Datasets:

bibtex_url
stringlengths
41
53
proceedings
stringlengths
38
50
bibtext
stringlengths
528
3.02k
abstract
stringlengths
17
2.35k
authors
sequencelengths
1
44
title
stringlengths
18
190
id
stringlengths
7
19
arxiv_id
stringlengths
0
10
GitHub
sequencelengths
1
1
paper_page
stringclasses
528 values
n_linked_authors
int64
-1
15
upvotes
int64
-1
77
num_comments
int64
-1
10
n_authors
int64
-1
52
Models
sequencelengths
0
100
Datasets
sequencelengths
0
15
Spaces
sequencelengths
0
46
paper_page_exists_pre_conf
int64
0
1
type
stringclasses
2 values
https://aclanthology.org/2023.conll-babylm.8.bib
https://aclanthology.org/2023.conll-babylm.8/
@inproceedings{edman-bylinina-2023-much, title = "Too Much Information: Keeping Training Simple for {B}aby{LM}s", author = "Edman, Lukas and Bylinina, Lisa", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.8", doi = "10.18653/v1/2023.conll-babylm.8", pages = "89--97", }
No abstract found
[ "Edman, Lukas", "Bylinina, Lisa" ]
Too Much Information: Keeping Training Simple for BabyLMs
conll-babylm.8
2311.01955
[ "" ]
https://huggingface.co/papers/2311.01955
1
0
0
2
[]
[]
[]
1
Poster
https://aclanthology.org/2023.conll-babylm.9.bib
https://aclanthology.org/2023.conll-babylm.9/
@inproceedings{chobey-etal-2023-training, title = "Can training neural language models on a curriculum with developmentally plausible data improve alignment with human reading behavior?", author = "Chobey, Aryaman and Smith, Oliver and Wang, Anzi and Prasad, Grusha", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.9", doi = "10.18653/v1/2023.conll-babylm.9", pages = "98--111", }
No abstract found
[ "Chobey, Aryaman", "Smith, Oliver", "Wang, Anzi", "Prasad, Grusha" ]
Can training neural language models on a curriculum with developmentally plausible data improve alignment with human reading behavior?
conll-babylm.9
2311.18761
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.10.bib
https://aclanthology.org/2023.conll-babylm.10/
@inproceedings{martinez-etal-2023-climb, title = "{CLIMB} {--} Curriculum Learning for Infant-inspired Model Building", author = "Martinez, Richard Diehl and McGovern, Hope and Goriely, Zebulon and Davis, Christopher and Caines, Andrew and Buttery, Paula and Beinborn, Lisa", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.10", doi = "10.18653/v1/2023.conll-babylm.10", pages = "112--127", }
No abstract found
[ "Martinez, Richard Diehl", "McGovern, Hope", "Goriely, Zebulon", "Davis, Christopher", "Caines, Andrew", "Buttery, Paula", "Beinborn, Lisa" ]
CLIMB – Curriculum Learning for Infant-inspired Model Building
conll-babylm.10
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.11.bib
https://aclanthology.org/2023.conll-babylm.11/
@inproceedings{amariucai-warstadt-2023-acquiring, title = "Acquiring Linguistic Knowledge from Multimodal Input", author = "Amariucai, Theodor and Warstadt, Alexander Scott", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.11", doi = "10.18653/v1/2023.conll-babylm.11", pages = "128--141", }
No abstract found
[ "Amariucai, Theodor", "Warstadt, Alex", "er Scott" ]
Acquiring Linguistic Knowledge from Multimodal Input
conll-babylm.11
2402.17936
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.12.bib
https://aclanthology.org/2023.conll-babylm.12/
@inproceedings{steuer-etal-2023-large, title = "Large {GPT}-like Models are Bad Babies: A Closer Look at the Relationship between Linguistic Competence and Psycholinguistic Measures", author = "Steuer, Julius and Mosbach, Marius and Klakow, Dietrich", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.12", doi = "10.18653/v1/2023.conll-babylm.12", pages = "142--157", }
No abstract found
[ "Steuer, Julius", "Mosbach, Marius", "Klakow, Dietrich" ]
Large GPT-like Models are Bad Babies: A Closer Look at the Relationship between Linguistic Competence and Psycholinguistic Measures
conll-babylm.12
2311.04547
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.13.bib
https://aclanthology.org/2023.conll-babylm.13/
@inproceedings{zhang-etal-2023-babys, title = "Baby{'}s {C}o{T}hought: Leveraging Large Language Models for Enhanced Reasoning in Compact Models", author = {Zhang, Zheyu and Yang, Han and Ma, Bolei and R{\"u}gamer, David and Nie, Ercong}, editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.13", doi = "10.18653/v1/2023.conll-babylm.13", pages = "158--170", }
No abstract found
[ "Zhang, Zheyu", "Yang, Han", "Ma, Bolei", "R{\\\"u}gamer, David", "Nie, Ercong" ]
Baby's CoThought: Leveraging Large Language Models for Enhanced Reasoning in Compact Models
conll-babylm.13
2308.01684
[ "https://github.com/oooranz/baby-cothought" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.14.bib
https://aclanthology.org/2023.conll-babylm.14/
@inproceedings{veysel-cagatan-2023-toddlerberta, title = "{T}oddler{BERT}a: Exploiting {B}aby{BERT}a for Grammar Learning and Language Understanding", author = {Veysel {\c{C}}a{\u{g}}atan, {\"O}mer}, editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.14", doi = "10.18653/v1/2023.conll-babylm.14", pages = "171--179", }
No abstract found
[ "Veysel {\\c{C}}a{\\u{g}}atan, {\\\"O}mer" ]
ToddlerBERTa: Exploiting BabyBERTa for Grammar Learning and Language Understanding
conll-babylm.14
2308.16336
[ "" ]
https://huggingface.co/papers/2308.16336
0
0
2
1
[]
[]
[]
1
Poster
https://aclanthology.org/2023.conll-babylm.15.bib
https://aclanthology.org/2023.conll-babylm.15/
@inproceedings{thoma-etal-2023-cogmemlm, title = "{C}og{M}em{LM}: Human-Like Memory Mechanisms Improve Performance and Cognitive Plausibility of {LLM}s", author = "Thoma, Lukas and Weyers, Ivonne and {\c{C}}ano, Erion and Schweter, Stefan and Mueller, Jutta L and Roth, Benjamin", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.15", doi = "10.18653/v1/2023.conll-babylm.15", pages = "180--185", }
No abstract found
[ "Thoma, Lukas", "Weyers, Ivonne", "{\\c{C}}ano, Erion", "Schweter, Stefan", "Mueller, Jutta L", "Roth, Benjamin" ]
CogMemLM: Human-Like Memory Mechanisms Improve Performance and Cognitive Plausibility of LLMs
conll-babylm.15
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.16.bib
https://aclanthology.org/2023.conll-babylm.16/
@inproceedings{zhao-etal-2023-babystories, title = "{B}aby{S}tories: Can Reinforcement Learning Teach Baby Language Models to Write Better Stories?", author = "Zhao, Xingmeng and Wang, Tongnian and Osborn, Sheri and Rios, Anthony", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.16", doi = "10.18653/v1/2023.conll-babylm.16", pages = "186--197", }
No abstract found
[ "Zhao, Xingmeng", "Wang, Tongnian", "Osborn, Sheri", "Rios, Anthony" ]
BabyStories: Can Reinforcement Learning Teach Baby Language Models to Write Better Stories?
conll-babylm.16
2310.16681
[ "https://github.com/zephyr1022/babystories-utsa" ]
https://huggingface.co/papers/2310.16681
0
0
0
4
[]
[]
[]
1
Poster
https://aclanthology.org/2023.conll-babylm.17.bib
https://aclanthology.org/2023.conll-babylm.17/
@inproceedings{debenedetto-2023-byte, title = "Byte-ranked Curriculum Learning for {B}aby{LM} Strict-small Shared Task 2023", author = "DeBenedetto, Justin", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.17", doi = "10.18653/v1/2023.conll-babylm.17", pages = "198--206", }
No abstract found
[ "DeBenedetto, Justin" ]
Byte-ranked Curriculum Learning for BabyLM Strict-small Shared Task 2023
conll-babylm.17
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.18.bib
https://aclanthology.org/2023.conll-babylm.18/
@inproceedings{cheng-etal-2023-mcgill, title = "{M}c{G}ill {B}aby{LM} Shared Task Submission: The Effects of Data Formatting and Structural Biases", author = "Cheng, Ziling and Aralikatte, Rahul and Porada, Ian and Spinoso-Di Piano, Cesare and Cheung, Jackie CK", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.18", doi = "10.18653/v1/2023.conll-babylm.18", pages = "207--220", }
No abstract found
[ "Cheng, Ziling", "Aralikatte, Rahul", "Porada, Ian", "Spinoso-Di Piano, Cesare", "Cheung, Jackie CK" ]
McGill BabyLM Shared Task Submission: The Effects of Data Formatting and Structural Biases
conll-babylm.18
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.19.bib
https://aclanthology.org/2023.conll-babylm.19/
@inproceedings{samuel-2023-mean, title = "Mean {BERT}s make erratic language teachers: the effectiveness of latent bootstrapping in low-resource settings", author = "Samuel, David", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.19", doi = "10.18653/v1/2023.conll-babylm.19", pages = "221--237", }
No abstract found
[ "Samuel, David" ]
Mean BERTs make erratic language teachers: the effectiveness of latent bootstrapping in low-resource settings
conll-babylm.19
2310.19420
[ "https://github.com/ltgoslo/boot-bert" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.20.bib
https://aclanthology.org/2023.conll-babylm.20/
@inproceedings{georges-gabriel-charpentier-samuel-2023-layers, title = "Not all layers are equally as important: Every Layer Counts {BERT}", author = "Georges Gabriel Charpentier, Lucas and Samuel, David", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.20", doi = "10.18653/v1/2023.conll-babylm.20", pages = "238--252", }
No abstract found
[ "Georges Gabriel Charpentier, Lucas", "Samuel, David" ]
Not all layers are equally as important: Every Layer Counts BERT
conll-babylm.20
2311.02265
[ "" ]
https://huggingface.co/papers/2311.02265
0
1
0
2
[]
[]
[]
1
Poster
https://aclanthology.org/2023.conll-babylm.21.bib
https://aclanthology.org/2023.conll-babylm.21/
@inproceedings{wolf-etal-2023-whisbert, title = "{W}his{BERT}: Multimodal Text-Audio Language Modeling on 100{M} Words", author = "Wolf, Lukas and Kotar, Klemen and Tuckute, Greta and Hosseini, Eghbal and I. Regev, Tamar and Gotlieb Wilcox, Ethan and Warstadt, Alexander Scott", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.21", doi = "10.18653/v1/2023.conll-babylm.21", pages = "253--258", }
No abstract found
[ "Wolf, Lukas", "Kotar, Klemen", "Tuckute, Greta", "Hosseini, Eghbal", "I. Regev, Tamar", "Gotlieb Wilcox, Ethan", "Warstadt, Alex", "er Scott" ]
WhisBERT: Multimodal Text-Audio Language Modeling on 100M Words
conll-babylm.21
2312.02931
[ "https://github.com/lu-wo/whisbert" ]
https://huggingface.co/papers/2312.02931
5
6
1
7
[]
[]
[]
1
Poster
https://aclanthology.org/2023.conll-babylm.22.bib
https://aclanthology.org/2023.conll-babylm.22/
@inproceedings{hong-etal-2023-surprisal, title = "A surprisal oracle for active curriculum language modeling", author = "Hong, Xudong and Lo{\'a}iciga, Sharid and Sayeed, Asad", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.22", doi = "10.18653/v1/2023.conll-babylm.22", pages = "259--268", }
No abstract found
[ "Hong, Xudong", "Lo{\\'a}iciga, Sharid", "Sayeed, Asad" ]
A surprisal oracle for active curriculum language modeling
conll-babylm.22
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.23.bib
https://aclanthology.org/2023.conll-babylm.23/
@inproceedings{mi-2023-mmi01, title = "Mmi01 at The {B}aby{LM} Challenge: Linguistically Motivated Curriculum Learning for Pretraining in Low-Resource Settings", author = "Mi, Maggie", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.23", doi = "10.18653/v1/2023.conll-babylm.23", pages = "269--278", }
No abstract found
[ "Mi, Maggie" ]
Mmi01 at The BabyLM Challenge: Linguistically Motivated Curriculum Learning for Pretraining in Low-Resource Settings
conll-babylm.23
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.24.bib
https://aclanthology.org/2023.conll-babylm.24/
@inproceedings{timiryasov-tastet-2023-baby, title = "Baby Llama: knowledge distillation from an ensemble of teachers trained on a small dataset with no performance penalty", author = "Timiryasov, Inar and Tastet, Jean-Loup", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.24", doi = "10.18653/v1/2023.conll-babylm.24", pages = "279--289", }
No abstract found
[ "Timiryasov, Inar", "Tastet, Jean-Loup" ]
Baby Llama: knowledge distillation from an ensemble of teachers trained on a small dataset with no performance penalty
conll-babylm.24
2308.02019
[ "https://github.com/timinar/babyllama" ]
https://huggingface.co/papers/2308.02019
0
0
0
2
[ "timinar/baby-llama-58m", "andrijdavid/baby-llama-58m-GGUF", "RichardErkhov/timinar_-_baby-llama-58m-4bits", "RichardErkhov/timinar_-_baby-llama-58m-8bits" ]
[]
[]
1
Poster
https://aclanthology.org/2023.conll-babylm.25.bib
https://aclanthology.org/2023.conll-babylm.25/
@inproceedings{oba-etal-2023-babylm, title = "{B}aby{LM} Challenge: Curriculum learning based on sentence complexity approximating language acquisition", author = "Oba, Miyu and Haga, Akari and Fukatsu, Akiyo and Oseki, Yohei", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.25", doi = "10.18653/v1/2023.conll-babylm.25", pages = "290--297", }
No abstract found
[ "Oba, Miyu", "Haga, Akari", "Fukatsu, Akiyo", "Oseki, Yohei" ]
BabyLM Challenge: Curriculum learning based on sentence complexity approximating language acquisition
conll-babylm.25
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.26.bib
https://aclanthology.org/2023.conll-babylm.26/
@inproceedings{berend-2023-better, title = "Better Together: Jointly Using Masked Latent Semantic Modeling and Masked Language Modeling for Sample Efficient Pre-training", author = "Berend, G{\'a}bor", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.26", doi = "10.18653/v1/2023.conll-babylm.26", pages = "298--307", }
No abstract found
[ "Berend, G{\\'a}bor" ]
Better Together: Jointly Using Masked Latent Semantic Modeling and Masked Language Modeling for Sample Efficient Pre-training
conll-babylm.26
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.27.bib
https://aclanthology.org/2023.conll-babylm.27/
@inproceedings{govindarajan-etal-2023-lil, title = "Lil-Bevo: Explorations of Strategies for Training Language Models in More Humanlike Ways", author = "Govindarajan, Venkata S and Rodriguez, Juan Diego and Bostrom, Kaj and Mahowald, Kyle", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.27", doi = "10.18653/v1/2023.conll-babylm.27", pages = "308--316", }
No abstract found
[ "Govindarajan, Venkata S", "Rodriguez, Juan Diego", "Bostrom, Kaj", "Mahowald, Kyle" ]
Lil-Bevo: Explorations of Strategies for Training Language Models in More Humanlike Ways
conll-babylm.27
2310.17591
[ "https://github.com/venkatasg/lil-bevo" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.28.bib
https://aclanthology.org/2023.conll-babylm.28/
@inproceedings{xiao-etal-2023-towards, title = "Towards more Human-like Language Models based on Contextualizer Pretraining Strategy", author = "Xiao, Chenghao and Hudson, G Thomas and Al Moubayed, Noura", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.28", doi = "10.18653/v1/2023.conll-babylm.28", pages = "317--326", }
No abstract found
[ "Xiao, Chenghao", "Hudson, G Thomas", "Al Moubayed, Noura" ]
Towards more Human-like Language Models based on Contextualizer Pretraining Strategy
conll-babylm.28
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.29.bib
https://aclanthology.org/2023.conll-babylm.29/
@inproceedings{momen-etal-2023-increasing, title = "Increasing The Performance of Cognitively Inspired Data-Efficient Language Models via Implicit Structure Building", author = "Momen, Omar and Arps, David and Kallmeyer, Laura", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.29", doi = "10.18653/v1/2023.conll-babylm.29", pages = "327--338", }
No abstract found
[ "Momen, Omar", "Arps, David", "Kallmeyer, Laura" ]
Increasing The Performance of Cognitively Inspired Data-Efficient Language Models via Implicit Structure Building
conll-babylm.29
2310.20589
[ "https://github.com/omarmomen14/structformer-babylm" ]
https://huggingface.co/papers/2310.20589
1
0
0
3
[ "omarmomen/structroberta_sx_final", "omarmomen/babylm_tokenizer_32k", "omarmomen/structroberta_s1_final", "omarmomen/structroberta_s2_final", "omarmomen/structroberta_sx2_final", "omarmomen/transformer_base_final_2", "omarmomen/structformer_s1_final_with_pos", "omarmomen/structformer_s2_final_with_pos", "omarmomen/roberta_base_32k_final" ]
[]
[]
1
Poster
https://aclanthology.org/2023.conll-babylm.30.bib
https://aclanthology.org/2023.conll-babylm.30/
@inproceedings{bhardwaj-etal-2023-pre, title = "Pre-training {LLM}s using human-like development data corpus", author = "Bhardwaj, Khushi and Shah, Raj Sanjay and Varma, Sashank", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.30", doi = "10.18653/v1/2023.conll-babylm.30", pages = "339--345", }
No abstract found
[ "Bhardwaj, Khushi", "Shah, Raj Sanjay", "Varma, Sashank" ]
Pre-training LLMs using human-like development data corpus
conll-babylm.30
2311.04666
[ "" ]
https://huggingface.co/papers/2311.04666
1
0
0
3
[]
[]
[]
1
Poster
https://aclanthology.org/2023.conll-babylm.31.bib
https://aclanthology.org/2023.conll-babylm.31/
@inproceedings{opper-etal-2023-effect, title = "On the effect of curriculum learning with developmental data for grammar acquisition", author = "Opper, Mattia and Morrison, J. and Siddharth, N.", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.31", doi = "10.18653/v1/2023.conll-babylm.31", pages = "346--355", }
No abstract found
[ "Opper, Mattia", "Morrison, J.", "Siddharth, N." ]
On the effect of curriculum learning with developmental data for grammar acquisition
conll-babylm.31
2311.00128
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.conll-babylm.32.bib
https://aclanthology.org/2023.conll-babylm.32/
@inproceedings{borazjanizadeh-2023-optimizing, title = "Optimizing {GPT}-2 Pretraining on {B}aby{LM} Corpus with Difficulty-based Sentence Reordering", author = "Borazjanizadeh, Nasim", editor = "Warstadt, Alex and Mueller, Aaron and Choshen, Leshem and Wilcox, Ethan and Zhuang, Chengxu and Ciro, Juan and Mosquera, Rafael and Paranjabe, Bhargavi and Williams, Adina and Linzen, Tal and Cotterell, Ryan", booktitle = "Proceedings of the BabyLM Challenge at the 27th Conference on Computational Natural Language Learning", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.conll-babylm.32", doi = "10.18653/v1/2023.conll-babylm.32", pages = "356--365", }
No abstract found
[ "Borazjanizadeh, Nasim" ]
Optimizing GPT-2 Pretraining on BabyLM Corpus with Difficulty-based Sentence Reordering
conll-babylm.32
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-main.1.bib
https://aclanthology.org/2023.crac-main.1/
@inproceedings{de-langhe-etal-2023-filling, title = "Filling in the Gaps: Efficient Event Coreference Resolution using Graph Autoencoder Networks", author = "De Langhe, Loic and De Clercq, Orphee and Hoste, Veronique", editor = "Ogrodniczuk, Maciej and Ng, Vincent and Pradhan, Sameer and Poesio, Massimo", booktitle = "Proceedings of The Sixth Workshop on Computational Models of Reference, Anaphora and Coreference (CRAC 2023)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.crac-main.1", doi = "10.18653/v1/2023.crac-main.1", pages = "1--7", }
No abstract found
[ "De Langhe, Loic", "De Clercq, Orphee", "Hoste, Veronique" ]
Filling in the Gaps: Efficient Event Coreference Resolution using Graph Autoencoder Networks
crac-main.1
2310.11965
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-main.2.bib
https://aclanthology.org/2023.crac-main.2/
@inproceedings{doosterlinck-etal-2023-caw, title = "{CAW}-coref: Conjunction-Aware Word-level Coreference Resolution", author = "D{'}Oosterlinck, Karel and Bitew, Semere Kiros and Papineau, Brandon and Potts, Christopher and Demeester, Thomas and Develder, Chris", editor = "Ogrodniczuk, Maciej and Ng, Vincent and Pradhan, Sameer and Poesio, Massimo", booktitle = "Proceedings of The Sixth Workshop on Computational Models of Reference, Anaphora and Coreference (CRAC 2023)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.crac-main.2", doi = "10.18653/v1/2023.crac-main.2", pages = "8--14", }
No abstract found
[ "D{'}Oosterlinck, Karel", "Bitew, Semere Kiros", "Papineau, Br", "on", "Potts, Christopher", "Demeester, Thomas", "Develder, Chris" ]
CAW-coref: Conjunction-Aware Word-level Coreference Resolution
crac-main.2
2310.06165
[ "https://github.com/kareldo/wl-coref" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-main.3.bib
https://aclanthology.org/2023.crac-main.3/
@inproceedings{wazni-sadrzadeh-2023-towards, title = "Towards Transparency in Coreference Resolution: A Quantum-Inspired Approach", author = "Wazni, Hadi and Sadrzadeh, Mehrnoosh", editor = "Ogrodniczuk, Maciej and Ng, Vincent and Pradhan, Sameer and Poesio, Massimo", booktitle = "Proceedings of The Sixth Workshop on Computational Models of Reference, Anaphora and Coreference (CRAC 2023)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.crac-main.3", doi = "10.18653/v1/2023.crac-main.3", pages = "15--27", }
No abstract found
[ "Wazni, Hadi", "Sadrzadeh, Mehrnoosh" ]
Towards Transparency in Coreference Resolution: A Quantum-Inspired Approach
crac-main.3
2312.00688
[ "https://github.com/hwazni/qcoref" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-main.4.bib
https://aclanthology.org/2023.crac-main.4/
@inproceedings{ye-etal-2023-scalar, title = "Scalar Anaphora: Annotating Degrees of Coreference in Text", author = "Ye, Bingyang and Tu, Jingxuan and Pustejovsky, James", editor = "Ogrodniczuk, Maciej and Ng, Vincent and Pradhan, Sameer and Poesio, Massimo", booktitle = "Proceedings of The Sixth Workshop on Computational Models of Reference, Anaphora and Coreference (CRAC 2023)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.crac-main.4", doi = "10.18653/v1/2023.crac-main.4", pages = "28--38", }
No abstract found
[ "Ye, Bingyang", "Tu, Jingxuan", "Pustejovsky, James" ]
Scalar Anaphora: Annotating Degrees of Coreference in Text
crac-main.4
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-main.5.bib
https://aclanthology.org/2023.crac-main.5/
@inproceedings{mullick-etal-2023-better, title = "Better Handling Coreference Resolution in Aspect Level Sentiment Classification by Fine-Tuning Language Models", author = "Mullick, Dhruv and Ghanem, Bilal and Fyshe, Alona", editor = "Ogrodniczuk, Maciej and Ng, Vincent and Pradhan, Sameer and Poesio, Massimo", booktitle = "Proceedings of The Sixth Workshop on Computational Models of Reference, Anaphora and Coreference (CRAC 2023)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.crac-main.5", doi = "10.18653/v1/2023.crac-main.5", pages = "39--47", }
No abstract found
[ "Mullick, Dhruv", "Ghanem, Bilal", "Fyshe, Alona" ]
Better Handling Coreference Resolution in Aspect Level Sentiment Classification by Fine-Tuning Language Models
crac-main.5
2307.05646
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-main.6.bib
https://aclanthology.org/2023.crac-main.6/
@inproceedings{simovic-chambers-2023-pragmatics, title = "The pragmatics of characters{'} mental perspectives in pronominal reference resolution", author = "Simovic, Tiana and Chambers, Craig", editor = "Ogrodniczuk, Maciej and Ng, Vincent and Pradhan, Sameer and Poesio, Massimo", booktitle = "Proceedings of The Sixth Workshop on Computational Models of Reference, Anaphora and Coreference (CRAC 2023)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.crac-main.6", doi = "10.18653/v1/2023.crac-main.6", pages = "48--50", }
No abstract found
[ "Simovic, Tiana", "Chambers, Craig" ]
The pragmatics of characters' mental perspectives in pronominal reference resolution
crac-main.6
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-main.7.bib
https://aclanthology.org/2023.crac-main.7/
@inproceedings{ates-etal-2023-marrs, title = "{MARRS}: Multimodal Reference Resolution System", author = "Ates, Halim Cagri and Bhargava, Shruti and Li, Site and Lu, Jiarui and Maddula, Siddhardha and Moniz, Joel Ruben Antony and Nalamalapu, Anil Kumar and Nguyen, Roman Hoang and Ozyildirim, Melis and Patel, Alkesh and Piraviperumal, Dhivya and Renkens, Vincent and Samal, Ankit and Tran, Thy and Tseng, Bo-Hsiang and Yu, Hong and Zhang, Yuan and Zou, Shirley", editor = "Ogrodniczuk, Maciej and Ng, Vincent and Pradhan, Sameer and Poesio, Massimo", booktitle = "Proceedings of The Sixth Workshop on Computational Models of Reference, Anaphora and Coreference (CRAC 2023)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.crac-main.7", doi = "10.18653/v1/2023.crac-main.7", pages = "51--58", }
No abstract found
[ "Ates, Halim Cagri", "Bhargava, Shruti", "Li, Site", "Lu, Jiarui", "Maddula, Siddhardha", "Moniz, Joel Ruben Antony", "Nalamalapu, Anil Kumar", "Nguyen, Roman Hoang", "Ozyildirim, Melis", "Patel, Alkesh", "Piraviperumal, Dhivya", "Renkens, Vincent", "Samal, Ankit", "Tran, Thy", "Tseng, Bo-Hsiang", "Yu, Hong", "Zhang, Yuan", "Zou, Shirley" ]
MARRS: Multimodal Reference Resolution System
crac-main.7
2311.01650
[ "" ]
https://huggingface.co/papers/2311.01650
0
2
0
18
[]
[]
[]
1
Poster
https://aclanthology.org/2023.crac-main.8.bib
https://aclanthology.org/2023.crac-main.8/
@inproceedings{okulska-wisnios-2023-towards, title = "Towards Harmful Erotic Content Detection through Coreference-Driven Contextual Analysis", author = "Okulska, Inez and Wisnios, Emilia", editor = "Ogrodniczuk, Maciej and Ng, Vincent and Pradhan, Sameer and Poesio, Massimo", booktitle = "Proceedings of The Sixth Workshop on Computational Models of Reference, Anaphora and Coreference (CRAC 2023)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.crac-main.8", doi = "10.18653/v1/2023.crac-main.8", pages = "59--70", }
No abstract found
[ "Okulska, Inez", "Wisnios, Emilia" ]
Towards Harmful Erotic Content Detection through Coreference-Driven Contextual Analysis
crac-main.8
2310.14325
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-main.9.bib
https://aclanthology.org/2023.crac-main.9/
@inproceedings{rim-pustejovsky-2023-integrated, title = "Integrated Annotation of Event Structure, Object States, and Entity Coreference", author = "Rim, Kyeongmin and Pustejovsky, James", editor = "Ogrodniczuk, Maciej and Ng, Vincent and Pradhan, Sameer and Poesio, Massimo", booktitle = "Proceedings of The Sixth Workshop on Computational Models of Reference, Anaphora and Coreference (CRAC 2023)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.crac-main.9", doi = "10.18653/v1/2023.crac-main.9", pages = "71--77", }
No abstract found
[ "Rim, Kyeongmin", "Pustejovsky, James" ]
Integrated Annotation of Event Structure, Object States, and Entity Coreference
crac-main.9
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-sharedtask.1.bib
https://aclanthology.org/2023.crac-sharedtask.1/
@inproceedings{zabokrtsky-etal-2023-findings, title = "Findings of the Second Shared Task on Multilingual Coreference Resolution", author = "{\v{Z}}abokrtsk{\'y}, Zden{\v{e}}k and Konopik, Miloslav and Nedoluzhko, Anna and Nov{\'a}k, Michal and Ogrodniczuk, Maciej and Popel, Martin and Prazak, Ondrej and Sido, Jakub and Zeman, Daniel", editor = "{\v{Z}}abokrtsk{\'y}, Zden{\v{e}}k and Ogrodniczuk, Maciej", booktitle = "Proceedings of the CRAC 2023 Shared Task on Multilingual Coreference Resolution", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.crac-sharedtask.1", doi = "10.18653/v1/2023.crac-sharedtask.1", pages = "1--18", abstract = "This paper summarizes the second edition of the shared task on multilingual coreference resolution, held with the CRAC 2023 workshop. Just like last year, participants of the shared task were to create trainable systems that detect mentions and group them based on identity coreference; however, this year{'}s edition uses a slightly different primary evaluation score, and is also broader in terms of covered languages: version 1.1 of the multilingual collection of harmonized coreference resources CorefUD was used as the source of training and evaluation data this time, with 17 datasets for 12 languages. 7 systems competed in this shared task.", }
This paper summarizes the second edition of the shared task on multilingual coreference resolution, held with the CRAC 2023 workshop. Just like last year, participants of the shared task were to create trainable systems that detect mentions and group them based on identity coreference; however, this year{'}s edition uses a slightly different primary evaluation score, and is also broader in terms of covered languages: version 1.1 of the multilingual collection of harmonized coreference resources CorefUD was used as the source of training and evaluation data this time, with 17 datasets for 12 languages. 7 systems competed in this shared task.
[ "{\\v{Z}}abokrtsk{\\'y}, Zden{\\v{e}}k", "Konopik, Miloslav", "Nedoluzhko, Anna", "Nov{\\'a}k, Michal", "Ogrodniczuk, Maciej", "Popel, Martin", "Prazak, Ondrej", "Sido, Jakub", "Zeman, Daniel" ]
Findings of the Second Shared Task on Multilingual Coreference Resolution
crac-sharedtask.1
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-sharedtask.2.bib
https://aclanthology.org/2023.crac-sharedtask.2/
@inproceedings{skachkova-etal-2023-multilingual, title = "Multilingual coreference resolution: Adapt and Generate", author = "Skachkova, Natalia and Anikina, Tatiana and Mokhova, Anna", editor = "{\v{Z}}abokrtsk{\'y}, Zden{\v{e}}k and Ogrodniczuk, Maciej", booktitle = "Proceedings of the CRAC 2023 Shared Task on Multilingual Coreference Resolution", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.crac-sharedtask.2", doi = "10.18653/v1/2023.crac-sharedtask.2", pages = "19--33", abstract = "The paper presents two multilingual coreference resolution systems submitted for the CRAC Shared Task 2023. The DFKI-Adapt system achieves 61.86 F1 score on the shared task test data, outperforming the official baseline by 4.9 F1 points. This system uses a combination of different features and training settings, including character embeddings, adapter modules, joint pre-training and loss-based re-training. We provide evaluation for each of the settings on 12 different datasets and compare the results. The other submission DFKI-MPrompt uses a novel approach that involves prompting for mention generation. Although the scores achieved by this model are lower compared to the baseline, the method shows a new way of approaching the coreference task and provides good results with just five epochs of training.", }
The paper presents two multilingual coreference resolution systems submitted for the CRAC Shared Task 2023. The DFKI-Adapt system achieves 61.86 F1 score on the shared task test data, outperforming the official baseline by 4.9 F1 points. This system uses a combination of different features and training settings, including character embeddings, adapter modules, joint pre-training and loss-based re-training. We provide evaluation for each of the settings on 12 different datasets and compare the results. The other submission DFKI-MPrompt uses a novel approach that involves prompting for mention generation. Although the scores achieved by this model are lower compared to the baseline, the method shows a new way of approaching the coreference task and provides good results with just five epochs of training.
[ "Skachkova, Natalia", "Anikina, Tatiana", "Mokhova, Anna" ]
Multilingual coreference resolution: Adapt and Generate
crac-sharedtask.2
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-sharedtask.3.bib
https://aclanthology.org/2023.crac-sharedtask.3/
@inproceedings{pamay-arslan-etal-2023-neural, title = "Neural End-to-End Coreference Resolution using Morphological Information", author = {Pamay Arslan, Tu{\u{g}}ba and Acar, Kutay and Eryi{\u{g}}it, G{\"u}l{\c{s}}en}, editor = "{\v{Z}}abokrtsk{\'y}, Zden{\v{e}}k and Ogrodniczuk, Maciej", booktitle = "Proceedings of the CRAC 2023 Shared Task on Multilingual Coreference Resolution", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.crac-sharedtask.3", doi = "10.18653/v1/2023.crac-sharedtask.3", pages = "34--40", abstract = "In morphologically rich languages, words consist of morphemes containing deeper information in morphology, and thus such languages may necessitate the use of morpheme-level representations as well as word representations. This study introduces a neural multilingual end-to-end coreference resolution system by incorporating morphological information in transformer-based word embeddings on the baseline model. This proposed model participated in the Sixth Workshop on Computational Models of Reference, Anaphora and Coreference (CRAC 2023). Including morphological information explicitly into the coreference resolution improves the performance, especially in morphologically rich languages (e.g., Catalan, Hungarian, and Turkish). The introduced model outperforms the baseline system by 2.57 percentage points on average by obtaining 59.53{\%} CoNLL F-score.", }
In morphologically rich languages, words consist of morphemes containing deeper information in morphology, and thus such languages may necessitate the use of morpheme-level representations as well as word representations. This study introduces a neural multilingual end-to-end coreference resolution system by incorporating morphological information in transformer-based word embeddings on the baseline model. This proposed model participated in the Sixth Workshop on Computational Models of Reference, Anaphora and Coreference (CRAC 2023). Including morphological information explicitly into the coreference resolution improves the performance, especially in morphologically rich languages (e.g., Catalan, Hungarian, and Turkish). The introduced model outperforms the baseline system by 2.57 percentage points on average by obtaining 59.53{\%} CoNLL F-score.
[ "Pamay Arslan, Tu{\\u{g}}ba", "Acar, Kutay", "Eryi{\\u{g}}it, G{\\\"u}l{\\c{s}}en" ]
Neural End-to-End Coreference Resolution using Morphological Information
crac-sharedtask.3
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-sharedtask.4.bib
https://aclanthology.org/2023.crac-sharedtask.4/
@inproceedings{straka-2023-ufal, title = "{{\'U}FAL} {C}or{P}ipe at {CRAC} 2023: Larger Context Improves Multilingual Coreference Resolution", author = "Straka, Milan", editor = "{\v{Z}}abokrtsk{\'y}, Zden{\v{e}}k and Ogrodniczuk, Maciej", booktitle = "Proceedings of the CRAC 2023 Shared Task on Multilingual Coreference Resolution", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.crac-sharedtask.4", doi = "10.18653/v1/2023.crac-sharedtask.4", pages = "41--51", abstract = "We present CorPipe, the winning entry to the CRAC 2023 Shared Task on Multilingual Coreference Resolution. Our system is an improved version of our earlier multilingual coreference pipeline, and it surpasses other participants by a large margin of 4.5 percent points. CorPipe first performs mention detection, followed by coreference linking via an antecedent-maximization approach on the retrieved spans. Both tasks are trained jointly on all available corpora using a shared pretrained language model. Our main improvements comprise inputs larger than 512 subwords and changing the mention decoding to support ensembling. The source code is available at https://github.com/ufal/crac2023-corpipe.", }
We present CorPipe, the winning entry to the CRAC 2023 Shared Task on Multilingual Coreference Resolution. Our system is an improved version of our earlier multilingual coreference pipeline, and it surpasses other participants by a large margin of 4.5 percent points. CorPipe first performs mention detection, followed by coreference linking via an antecedent-maximization approach on the retrieved spans. Both tasks are trained jointly on all available corpora using a shared pretrained language model. Our main improvements comprise inputs larger than 512 subwords and changing the mention decoding to support ensembling. The source code is available at https://github.com/ufal/crac2023-corpipe.
[ "Straka, Milan" ]
ÚFAL CorPipe at CRAC 2023: Larger Context Improves Multilingual Coreference Resolution
crac-sharedtask.4
2311.14391
[ "https://github.com/ufal/crac2023-corpipe" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.crac-sharedtask.5.bib
https://aclanthology.org/2023.crac-sharedtask.5/
@inproceedings{porada-cheung-2023-mcgill, title = "{M}c{G}ill at {CRAC} 2023: Multilingual Generalization of Entity-Ranking Coreference Resolution Models", author = "Porada, Ian and Cheung, Jackie Chi Kit", editor = "{\v{Z}}abokrtsk{\'y}, Zden{\v{e}}k and Ogrodniczuk, Maciej", booktitle = "Proceedings of the CRAC 2023 Shared Task on Multilingual Coreference Resolution", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.crac-sharedtask.5", doi = "10.18653/v1/2023.crac-sharedtask.5", pages = "52--57", abstract = "Our submission to the CRAC 2023 shared task, described herein, is an adapted entity-ranking model jointly trained on all 17 datasets spanning 12 languages. Our model outperforms the shared task baselines by a difference in F1 score of +8.47, achieving an ultimate F1 score of 65.43 and fourth place in the shared task. We explore design decisions related to data preprocessing, the pretrained encoder, and data mixing.", }
Our submission to the CRAC 2023 shared task, described herein, is an adapted entity-ranking model jointly trained on all 17 datasets spanning 12 languages. Our model outperforms the shared task baselines by a difference in F1 score of +8.47, achieving an ultimate F1 score of 65.43 and fourth place in the shared task. We explore design decisions related to data preprocessing, the pretrained encoder, and data mixing.
[ "Porada, Ian", "Cheung, Jackie Chi Kit" ]
McGill at CRAC 2023: Multilingual Generalization of Entity-Ranking Coreference Resolution Models
crac-sharedtask.5
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.1.bib
https://aclanthology.org/2023.gem-1.1/
@inproceedings{theron-2023-contextualizing, title = "Contextualizing the Limits of Model {\&} Evaluation Dataset Curation on Semantic Similarity Classification Tasks", author = "Theron, Daniel", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.1", pages = "1--8", abstract = "This paper demonstrates how the limitations of pre-trained models and open evaluation datasets factor into assessing the performance of binary semantic similarity classification tasks. As (1) end-user-facing documentation around the curation of these datasets and pre-trained model training regimes is often not easily accessible and (2) given the lower friction and higher demand to quickly deploy such systems in real-world contexts, our study reinforces prior work showing performance disparities across datasets, embedding techniques and distance metrics, while highlighting the importance of understanding how data is collected, curated and analyzed in semantic similarity classification.", }
This paper demonstrates how the limitations of pre-trained models and open evaluation datasets factor into assessing the performance of binary semantic similarity classification tasks. As (1) end-user-facing documentation around the curation of these datasets and pre-trained model training regimes is often not easily accessible and (2) given the lower friction and higher demand to quickly deploy such systems in real-world contexts, our study reinforces prior work showing performance disparities across datasets, embedding techniques and distance metrics, while highlighting the importance of understanding how data is collected, curated and analyzed in semantic similarity classification.
[ "Theron, Daniel" ]
Contextualizing the Limits of Model & Evaluation Dataset Curation on Semantic Similarity Classification Tasks
gem-1.1
2311.04927
[ "" ]
https://huggingface.co/papers/2311.04927
1
0
0
1
[]
[]
[]
1
Poster
https://aclanthology.org/2023.gem-1.2.bib
https://aclanthology.org/2023.gem-1.2/
@inproceedings{mendonca-etal-2023-dialogue, title = "Dialogue Quality and Emotion Annotations for Customer Support Conversations", author = "Mendonca, John and Pereira, Patr{\'\i}cia and Menezes, Miguel and Cabarr{\~a}o, Vera and Farinha, Ana C and Moniz, Helena and Lavie, Alon and Trancoso, Isabel", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.2", pages = "9--21", abstract = "Task-oriented conversational datasets often lack topic variability and linguistic diversity. However, with the advent of Large Language Models (LLMs) pretrained on extensive, multilingual and diverse text data, these limitations seem overcome. Nevertheless, their generalisability to different languages and domains in dialogue applications remains uncertain without benchmarking datasets. This paper presents a holistic annotation approach for emotion and conversational quality in the context of bilingual customer support conversations. By performing annotations that take into consideration the complete instances that compose a conversation, one can form a broader perspective of the dialogue as a whole. Furthermore, it provides a unique and valuable resource for the development of text classification models. To this end, we present benchmarks for Emotion Recognition and Dialogue Quality Estimation and show that further research is needed to leverage these models in a production setting.", }
Task-oriented conversational datasets often lack topic variability and linguistic diversity. However, with the advent of Large Language Models (LLMs) pretrained on extensive, multilingual and diverse text data, these limitations seem overcome. Nevertheless, their generalisability to different languages and domains in dialogue applications remains uncertain without benchmarking datasets. This paper presents a holistic annotation approach for emotion and conversational quality in the context of bilingual customer support conversations. By performing annotations that take into consideration the complete instances that compose a conversation, one can form a broader perspective of the dialogue as a whole. Furthermore, it provides a unique and valuable resource for the development of text classification models. To this end, we present benchmarks for Emotion Recognition and Dialogue Quality Estimation and show that further research is needed to leverage these models in a production setting.
[ "Mendonca, John", "Pereira, Patr{\\'\\i}cia", "Menezes, Miguel", "Cabarr{\\~a}o, Vera", "Farinha, Ana C", "Moniz, Helena", "Lavie, Alon", "Trancoso, Isabel" ]
Dialogue Quality and Emotion Annotations for Customer Support Conversations
gem-1.2
2311.13910
[ "https://github.com/johndmendonca/maia-dqe" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.3.bib
https://aclanthology.org/2023.gem-1.3/
@inproceedings{jensen-hojmark-2023-formalizing, title = "Formalizing content creation and evaluation methods for {AI}-generated social media content", author = "Jensen, Christian and H{\o}jmark, Axel", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.3", pages = "22--41", abstract = "This study explores the use of large language models (LLMs), such as ChatGPT and GPT-4, in creating high-quality text-based social media content for businesses on LinkedIn. We introduce a novel architecture incorporating external knowledge bases and a multi-step writing approach, which extracts facts from company websites to form a knowledge graph. Our method{'}s efficacy is assessed using the {``}Long-LinkedIn{''} evaluation dataset designed for long-form post generation. Results indicate that our iterative refinement significantly improves content quality. However, knowledge-enhanced prompts occasionally reduced quality due to potential formulation issues. LLM-based evaluations, particularly using ChatGPT, showcased potential as a less resource-intensive alternative to human assessments, with a notable alignment between the two evaluation techniques.", }
This study explores the use of large language models (LLMs), such as ChatGPT and GPT-4, in creating high-quality text-based social media content for businesses on LinkedIn. We introduce a novel architecture incorporating external knowledge bases and a multi-step writing approach, which extracts facts from company websites to form a knowledge graph. Our method{'}s efficacy is assessed using the {``}Long-LinkedIn{''} evaluation dataset designed for long-form post generation. Results indicate that our iterative refinement significantly improves content quality. However, knowledge-enhanced prompts occasionally reduced quality due to potential formulation issues. LLM-based evaluations, particularly using ChatGPT, showcased potential as a less resource-intensive alternative to human assessments, with a notable alignment between the two evaluation techniques.
[ "Jensen, Christian", "H{\\o}jmark, Axel" ]
Formalizing content creation and evaluation methods for AI-generated social media content
gem-1.3
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.4.bib
https://aclanthology.org/2023.gem-1.4/
@inproceedings{mehri-shwartz-2023-automatic, title = "Automatic Evaluation of Generative Models with Instruction Tuning", author = "Mehri, Shuhaib and Shwartz, Vered", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.4", pages = "42--52", abstract = "Automatic evaluation of natural language generation has long been an elusive goal in NLP. A recent paradigm fine-tunes pre-trained language models to emulate human judgements for a particular task and evaluation criterion. Inspired by the generalization ability of instruction-tuned models, we propose a learned metric based on instruction tuning. To test our approach, we collected HEAP, a dataset of human judgements across various NLG tasks and evaluation criteria. Our findings demonstrate that instruction tuning language models on HEAP yields good performance on many evaluation tasks, though some criteria are less trivial to learn than others. Further, jointly training on multiple tasks can yield additional performance improvements, which can be beneficial for future tasks with little to no human annotated data.", }
Automatic evaluation of natural language generation has long been an elusive goal in NLP. A recent paradigm fine-tunes pre-trained language models to emulate human judgements for a particular task and evaluation criterion. Inspired by the generalization ability of instruction-tuned models, we propose a learned metric based on instruction tuning. To test our approach, we collected HEAP, a dataset of human judgements across various NLG tasks and evaluation criteria. Our findings demonstrate that instruction tuning language models on HEAP yields good performance on many evaluation tasks, though some criteria are less trivial to learn than others. Further, jointly training on multiple tasks can yield additional performance improvements, which can be beneficial for future tasks with little to no human annotated data.
[ "Mehri, Shuhaib", "Shwartz, Vered" ]
Automatic Evaluation of Generative Models with Instruction Tuning
gem-1.4
2310.20072
[ "https://github.com/shuhaibm/heap" ]
https://huggingface.co/papers/2310.20072
0
0
0
2
[]
[]
[]
1
Poster
https://aclanthology.org/2023.gem-1.5.bib
https://aclanthology.org/2023.gem-1.5/
@inproceedings{du-etal-2023-effective, title = "Effective Proxy for Human Labeling: Ensemble Disagreement Scores in Large Language Models for Industrial {NLP}", author = "Du, Wei and Advani, Laksh and Gambhir, Yashmeet and Perry, Daniel and Shiralkar, Prashant and Xing, Zhengzheng and Colak, Aaron", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.5", pages = "53--61", abstract = "Large language models (LLMs) have demonstrated significant capability to generalize across a large number of NLP tasks. For industry applications, it is imperative to assess the performance of the LLM on unlabeled production data from time to time to validate for a real-world setting. Human labeling to assess model error requires considerable expense and time delay. Here we demonstrate that ensemble disagreement scores work well as a proxy for human labeling for language models in zero-shot, few-shot, and fine-tuned settings, per our evaluation on keyphrase extraction (KPE) task. We measure fidelity of the results by comparing to true error measured from human labeled ground truth. We contrast with the alternative of using another LLM as a source of machine labels, or {`}silver labels{'}. Results across various languages and domains show disagreement scores provide a better estimation of model performance with mean average error (MAE) as low as 0.4{\%} and on average 13.8{\%} better than using silver labels.", }
Large language models (LLMs) have demonstrated significant capability to generalize across a large number of NLP tasks. For industry applications, it is imperative to assess the performance of the LLM on unlabeled production data from time to time to validate for a real-world setting. Human labeling to assess model error requires considerable expense and time delay. Here we demonstrate that ensemble disagreement scores work well as a proxy for human labeling for language models in zero-shot, few-shot, and fine-tuned settings, per our evaluation on keyphrase extraction (KPE) task. We measure fidelity of the results by comparing to true error measured from human labeled ground truth. We contrast with the alternative of using another LLM as a source of machine labels, or {`}silver labels{'}. Results across various languages and domains show disagreement scores provide a better estimation of model performance with mean average error (MAE) as low as 0.4{\%} and on average 13.8{\%} better than using silver labels.
[ "Du, Wei", "Advani, Laksh", "Gambhir, Yashmeet", "Perry, Daniel", "Shiralkar, Prashant", "Xing, Zhengzheng", "Colak, Aaron" ]
Effective Proxy for Human Labeling: Ensemble Disagreement Scores in Large Language Models for Industrial NLP
gem-1.5
2309.05619
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.6.bib
https://aclanthology.org/2023.gem-1.6/
@inproceedings{oneil-etal-2023-automatic, title = "Automatic Reflection Generation for Peer-to-Peer Counseling", author = "O{'}neil, Emma and Sedoc, Jo{\~a}o and Yang, Diyi and Zhu, Haiyi and Ungar, Lyle", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.6", pages = "62--75", abstract = "Online peer counseling platforms enable conversations between millions of people seeking and offering mental health support. Among counseling skills, reflective listening, i.e., capturing and returning to the client something the client has said, is important for positive therapeutic outcomes. We introduce a reflection generation system for online mental health support conversations leveraging GPT-3, a large language model. We compare few-shot learning against fine-tuning and assess the impact of the quality of training examples as measured by fluency, reflection resemblance, and overall preference. Fine-tuned GPT-3 generates responses that human evaluators rate as comparable in reflection quality to responses used for tuning. Models based on high-quality responses generate substantially better reflections than ones tuned on actual responses from a large online counseling service{--}and better reflections than the actual counselor responses. These results suggest the care needed in selecting examples for tuning generative models.", }
Online peer counseling platforms enable conversations between millions of people seeking and offering mental health support. Among counseling skills, reflective listening, i.e., capturing and returning to the client something the client has said, is important for positive therapeutic outcomes. We introduce a reflection generation system for online mental health support conversations leveraging GPT-3, a large language model. We compare few-shot learning against fine-tuning and assess the impact of the quality of training examples as measured by fluency, reflection resemblance, and overall preference. Fine-tuned GPT-3 generates responses that human evaluators rate as comparable in reflection quality to responses used for tuning. Models based on high-quality responses generate substantially better reflections than ones tuned on actual responses from a large online counseling service{--}and better reflections than the actual counselor responses. These results suggest the care needed in selecting examples for tuning generative models.
[ "O{'}neil, Emma", "Sedoc, Jo{\\~a}o", "Yang, Diyi", "Zhu, Haiyi", "Ungar, Lyle" ]
Automatic Reflection Generation for Peer-to-Peer Counseling
gem-1.6
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.7.bib
https://aclanthology.org/2023.gem-1.7/
@inproceedings{harvill-etal-2023-one-shot, title = "One-Shot and Few-Shot Exemplification Modeling", author = "Harvill, John and Yoon, Hee Suk and Yoon, Eunseop and Hasegawa-Johnson, Mark and Yoo, Chang", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.7", pages = "76--87", abstract = "Exemplification modeling is a task where the goal is to produce a viable example sentence that uses a target word with a target definition. The task is non-trivial for polysemous words, and previous works have only explored settings where ample labeled training data is available. In this paper, we demonstrate that exemplification modeling can be performed without a large labeled training corpus by either changing the format of the task (one-shot) or prompting large language models (few-shot), and ablate key components of our proposed one-shot and few-shot systems. We provide extensive automatic and human evaluations of model performance and find that our proposed one-shot and few-shot approaches perform similarly to a fully supervised baseline. We compare and contrast each method in terms of labeled training dataset size, performance, and model size, and find that each technique has at least one tradeoff that another approach does not.", }
Exemplification modeling is a task where the goal is to produce a viable example sentence that uses a target word with a target definition. The task is non-trivial for polysemous words, and previous works have only explored settings where ample labeled training data is available. In this paper, we demonstrate that exemplification modeling can be performed without a large labeled training corpus by either changing the format of the task (one-shot) or prompting large language models (few-shot), and ablate key components of our proposed one-shot and few-shot systems. We provide extensive automatic and human evaluations of model performance and find that our proposed one-shot and few-shot approaches perform similarly to a fully supervised baseline. We compare and contrast each method in terms of labeled training dataset size, performance, and model size, and find that each technique has at least one tradeoff that another approach does not.
[ "Harvill, John", "Yoon, Hee Suk", "Yoon, Eunseop", "Hasegawa-Johnson, Mark", "Yoo, Chang" ]
One-Shot and Few-Shot Exemplification Modeling
gem-1.7
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.8.bib
https://aclanthology.org/2023.gem-1.8/
@inproceedings{zhou-etal-2023-leveraging, title = "Leveraging Large Language Models for Enhanced Product Descriptions in e{C}ommerce", author = "Zhou, Jianghong and Liu, Bo and Acharya, Jhalak and Hong, Yao and Lee, Kuang-Chih and Wen, Musen", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.8", pages = "88--96", abstract = "In the dynamic field of eCommerce, the quality and comprehensiveness of product descriptions are pivotal for enhancing search visibility and customer engagement. Effective product descriptions can address the {`}cold start{'} problem, align with market trends, and ultimately lead to increased click-through rates. Traditional methods for crafting these descriptions often involve significant human effort and may lack both consistency and scalability. This paper introduces a novel methodology for automating product description generation using the LLAMA 2.0 7B language model. We train the model on a dataset of authentic product descriptions from Walmart, one of the largest eCommerce platforms. The model is then fine-tuned for domain-specific language features and eCommerce nuances to enhance its utility in sales and user engagement. We employ multiple evaluation metrics{---}including NDCG, customer click-through rates, and human assessments{---}to validate the effectiveness of our approach. Our findings reveal that the system is not only scalable but also significantly reduces the human workload involved in creating product descriptions. This study underscores the considerable potential of large language models like LLAMA 2.0 7B in automating and optimizing various facets of eCommerce platforms, offering significant business impact, including improved search functionality and increased sales.", }
In the dynamic field of eCommerce, the quality and comprehensiveness of product descriptions are pivotal for enhancing search visibility and customer engagement. Effective product descriptions can address the {`}cold start{'} problem, align with market trends, and ultimately lead to increased click-through rates. Traditional methods for crafting these descriptions often involve significant human effort and may lack both consistency and scalability. This paper introduces a novel methodology for automating product description generation using the LLAMA 2.0 7B language model. We train the model on a dataset of authentic product descriptions from Walmart, one of the largest eCommerce platforms. The model is then fine-tuned for domain-specific language features and eCommerce nuances to enhance its utility in sales and user engagement. We employ multiple evaluation metrics{---}including NDCG, customer click-through rates, and human assessments{---}to validate the effectiveness of our approach. Our findings reveal that the system is not only scalable but also significantly reduces the human workload involved in creating product descriptions. This study underscores the considerable potential of large language models like LLAMA 2.0 7B in automating and optimizing various facets of eCommerce platforms, offering significant business impact, including improved search functionality and increased sales.
[ "Zhou, Jianghong", "Liu, Bo", "Acharya, Jhalak", "Hong, Yao", "Lee, Kuang-Chih", "Wen, Musen" ]
Leveraging Large Language Models for Enhanced Product Descriptions in eCommerce
gem-1.8
2310.18357
[ "" ]
https://huggingface.co/papers/2310.18357
0
0
0
5
[]
[]
[]
1
Poster
https://aclanthology.org/2023.gem-1.9.bib
https://aclanthology.org/2023.gem-1.9/
@inproceedings{amouyal-etal-2023-qampari, title = "{QAMPARI}: A Benchmark for Open-domain Questions with Many Answers", author = "Amouyal, Samuel and Wolfson, Tomer and Rubin, Ohad and Yoran, Ori and Herzig, Jonathan and Berant, Jonathan", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.9", pages = "97--110", abstract = "Existing benchmarks for open-domain question answering (ODQA) typically focus on questions whose answers are all in a single paragraph. By contrast, many natural questions, such as {``}What players were drafted by the Brooklyn Nets?{''} have a long list of answers extracted from multiple paragraphs. Answering such questions requires retrieving and reading many passages from a large corpus. We introduce QAMPARI, an ODQA benchmark, where answers are lists of entities, spread across many paragraphs. We created QAMPARI by (a) generating questions with multiple answers from Wikipedia{'}s knowledge graph and tables, (b) automatically pairing answers with supporting evidence in Wikipedia paragraphs, and (c) manually paraphrasing questions and validating each answer. Across a wide range of ODQA models, we find that QAMPARI is challenging in terms of both passage retrieval and answer generation, with models reaching an F1 score of 32.8 at best. We view QAMPARI as a valuable resource for ODQA research, which will aid to develop models that handle a broad range of question types, including single and multi-answer questions.", }
Existing benchmarks for open-domain question answering (ODQA) typically focus on questions whose answers are all in a single paragraph. By contrast, many natural questions, such as {``}What players were drafted by the Brooklyn Nets?{''} have a long list of answers extracted from multiple paragraphs. Answering such questions requires retrieving and reading many passages from a large corpus. We introduce QAMPARI, an ODQA benchmark, where answers are lists of entities, spread across many paragraphs. We created QAMPARI by (a) generating questions with multiple answers from Wikipedia{'}s knowledge graph and tables, (b) automatically pairing answers with supporting evidence in Wikipedia paragraphs, and (c) manually paraphrasing questions and validating each answer. Across a wide range of ODQA models, we find that QAMPARI is challenging in terms of both passage retrieval and answer generation, with models reaching an F1 score of 32.8 at best. We view QAMPARI as a valuable resource for ODQA research, which will aid to develop models that handle a broad range of question types, including single and multi-answer questions.
[ "Amouyal, Samuel", "Wolfson, Tomer", "Rubin, Ohad", "Yoran, Ori", "Herzig, Jonathan", "Berant, Jonathan" ]
QAMPARI: A Benchmark for Open-domain Questions with Many Answers
gem-1.9
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.10.bib
https://aclanthology.org/2023.gem-1.10/
@inproceedings{kour-etal-2023-unveiling, title = "Unveiling Safety Vulnerabilities of Large Language Models", author = "Kour, George and Zalmanovici, Marcel and Zwerdling, Naama and Goldbraich, Esther and Fandina, Ora and Anaby Tavor, Ateret and Raz, Orna and Farchi, Eitan", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.10", pages = "111--127", abstract = "As large language models become more prevalent, their possible harmful or inappropriate responses are a cause for concern. This paper introduces a unique dataset containing adversarial examples in the form of questions, we call AttaQ, designed to provoke such harmful or inappropriate responses. We assess the efficacy of our dataset by analyzing the vulnerabilities of various models when subjected to it. Additionally, we introduce a novel automatic approach for identifying and naming vulnerable semantic regions {---} input semantic areas for which the model is likely to produce harmful outputs. This is achieved through the application of specialized clustering techniques that consider both the semantic similarity of the input attacks and the harmfulness of the model{'}s responses.Automatically identifying vulnerable semantic regions enhances the evaluation of model weaknesses, facilitating targeted improvements to its safety mechanisms and overall reliability.", }
As large language models become more prevalent, their possible harmful or inappropriate responses are a cause for concern. This paper introduces a unique dataset containing adversarial examples in the form of questions, we call AttaQ, designed to provoke such harmful or inappropriate responses. We assess the efficacy of our dataset by analyzing the vulnerabilities of various models when subjected to it. Additionally, we introduce a novel automatic approach for identifying and naming vulnerable semantic regions {---} input semantic areas for which the model is likely to produce harmful outputs. This is achieved through the application of specialized clustering techniques that consider both the semantic similarity of the input attacks and the harmfulness of the model{'}s responses.Automatically identifying vulnerable semantic regions enhances the evaluation of model weaknesses, facilitating targeted improvements to its safety mechanisms and overall reliability.
[ "Kour, George", "Zalmanovici, Marcel", "Zwerdling, Naama", "Goldbraich, Esther", "F", "ina, Ora", "Anaby Tavor, Ateret", "Raz, Orna", "Farchi, Eitan" ]
Unveiling Safety Vulnerabilities of Large Language Models
gem-1.10
2311.04124
[ "" ]
https://huggingface.co/papers/2311.04124
4
6
0
8
[]
[ "ibm/AttaQ" ]
[]
1
Poster
https://aclanthology.org/2023.gem-1.11.bib
https://aclanthology.org/2023.gem-1.11/
@inproceedings{mallick-etal-2023-adapting, title = "Adapting Pre-trained Generative Models for Extractive Question Answering", author = "Mallick, Prabir and Nayak, Tapas and Bhattacharya, Indrajit", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.11", pages = "128--137", abstract = "Pre-trained Generative models such as BART, T5, etc. have gained prominence as a preferred method for text generation in various natural language processing tasks, including abstractive long-form question answering (QA) and summarization. However, the potential of generative models in extractive QA tasks, where discriminative models are commonly employed, remains largely unexplored. Discriminative models often encounter challenges associated with label sparsity, particularly when only a small portion of the context contains the answer. The challenge is more pronounced for multi-span answers. In this work, we introduce a novel approach that uses the power of pre-trained generative models to address extractive QA tasks by generating indexes corresponding to context tokens or sentences that form part of the answer. Through comprehensive evaluations on multiple extractive QA datasets, including MultiSpanQA, BioASQ, MASHQA, and WikiQA, we demonstrate the superior performance of our proposed approach compared to existing state-of-the-art models.", }
Pre-trained Generative models such as BART, T5, etc. have gained prominence as a preferred method for text generation in various natural language processing tasks, including abstractive long-form question answering (QA) and summarization. However, the potential of generative models in extractive QA tasks, where discriminative models are commonly employed, remains largely unexplored. Discriminative models often encounter challenges associated with label sparsity, particularly when only a small portion of the context contains the answer. The challenge is more pronounced for multi-span answers. In this work, we introduce a novel approach that uses the power of pre-trained generative models to address extractive QA tasks by generating indexes corresponding to context tokens or sentences that form part of the answer. Through comprehensive evaluations on multiple extractive QA datasets, including MultiSpanQA, BioASQ, MASHQA, and WikiQA, we demonstrate the superior performance of our proposed approach compared to existing state-of-the-art models.
[ "Mallick, Prabir", "Nayak, Tapas", "Bhattacharya, Indrajit" ]
Adapting Pre-trained Generative Models for Extractive Question Answering
gem-1.11
2311.02961
[ "" ]
https://huggingface.co/papers/2311.02961
0
1
0
3
[]
[]
[]
1
Poster
https://aclanthology.org/2023.gem-1.12.bib
https://aclanthology.org/2023.gem-1.12/
@inproceedings{rabinovich-etal-2023-predicting, title = "Predicting Question-Answering Performance of Large Language Models through Semantic Consistency", author = "Rabinovich, Ella and Ackerman, Samuel and Raz, Orna and Farchi, Eitan and Anaby Tavor, Ateret", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.12", pages = "138--154", abstract = "Semantic consistency of a language model is broadly defined as the model{'}s ability to produce semantically-equivalent outputs, given semantically-equivalent inputs. We address the task of assessing question-answering (QA) semantic consistency of contemporary large language models (LLMs) by manually creating a benchmark dataset with high-quality paraphrases for factual questions, and release the dataset to the community.We further combine the semantic consistency metric with additional measurements suggested in prior work as correlating with LLM QA accuracy, for building and evaluating a framework for factual QA reference-less performance prediction {--} predicting the likelihood of a language model to accurately answer a question. Evaluating the framework on five contemporary LLMs, we demonstrate encouraging, significantly outperforming baselines, results.", }
Semantic consistency of a language model is broadly defined as the model{'}s ability to produce semantically-equivalent outputs, given semantically-equivalent inputs. We address the task of assessing question-answering (QA) semantic consistency of contemporary large language models (LLMs) by manually creating a benchmark dataset with high-quality paraphrases for factual questions, and release the dataset to the community.We further combine the semantic consistency metric with additional measurements suggested in prior work as correlating with LLM QA accuracy, for building and evaluating a framework for factual QA reference-less performance prediction {--} predicting the likelihood of a language model to accurately answer a question. Evaluating the framework on five contemporary LLMs, we demonstrate encouraging, significantly outperforming baselines, results.
[ "Rabinovich, Ella", "Ackerman, Samuel", "Raz, Orna", "Farchi, Eitan", "Anaby Tavor, Ateret" ]
Predicting Question-Answering Performance of Large Language Models through Semantic Consistency
gem-1.12
2311.01152
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.13.bib
https://aclanthology.org/2023.gem-1.13/
@inproceedings{yu-etal-2023-towards, title = "Towards Effective Long-Form {QA} with Evidence Augmentation", author = "Yu, Mengxia and Rosenthal, Sara and Bornea, Mihaela and Sil, Avi", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.13", pages = "155--164", abstract = "In this study, we focus on the challenge of improving Long-form Question Answering (LFQA) by extracting and effectively utilizing knowledge from a large set of retrieved passages. We first demonstrate the importance of accurate evidence retrieval for LFQA, showing that optimal extracted knowledge from passages significantly benefits the generation. We also show that the choice of generative models impacts the system{'}s ability to leverage the evidence and produce answers that are grounded in the retrieved passages. We propose a Mixture of Experts (MoE) model as an alternative to the Fusion in Decoder (FiD) used in state-of-the-art LFQA systems and we compare these two models in our experiments.", }
In this study, we focus on the challenge of improving Long-form Question Answering (LFQA) by extracting and effectively utilizing knowledge from a large set of retrieved passages. We first demonstrate the importance of accurate evidence retrieval for LFQA, showing that optimal extracted knowledge from passages significantly benefits the generation. We also show that the choice of generative models impacts the system{'}s ability to leverage the evidence and produce answers that are grounded in the retrieved passages. We propose a Mixture of Experts (MoE) model as an alternative to the Fusion in Decoder (FiD) used in state-of-the-art LFQA systems and we compare these two models in our experiments.
[ "Yu, Mengxia", "Rosenthal, Sara", "Bornea, Mihaela", "Sil, Avi" ]
Towards Effective Long-Form QA with Evidence Augmentation
gem-1.13
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.14.bib
https://aclanthology.org/2023.gem-1.14/
@inproceedings{wang-sha-2023-harnessing, title = "Harnessing the Plug-and-Play Controller by Prompting", author = "Wang, Hao and Sha, Lei", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.14", pages = "165--174", abstract = "Controllable text generation is a growing field within natural language generation (NLG) that focuses on producing text that meets specific constraints in real-world applications. Previous approaches, such as plug-and-play controllers (PPCs), aimed to steer the properties of generated text in a flexible manner. However, these methods often compromised the integrity of the language model{'}s decoding process, resulting in less smooth text generation.Alternatively, other techniques utilized multiple attribute prompts to align the generated text with desired attributes, but this approach required prompt design for each attribute and was dependent on the size of the language model. This paper introduces a novel method for flexible attribute control in text generation using pre-trained language models (PLMs). The proposed approach aims to enhance the fluency of generated text by guiding the generation process with PPCs. The key idea is to dynamically adjust the distribution of generated text by modifying prompts, effectively constraining the output space of the language model and influencing the desired attribute. To enable smooth cooperation between the PLM and the PPC, our work innovativel proposes a new model fine-tuning method: Reinforcement Learning with Dynamic Adjust Feedback (RLDAF).This fine-tuning process adapts a small subset of the language model{'}s parameters based on the generating actions taken during the PPC control process. The resulting harmonious collaboration between the PLM and PPC leads to improved smoothness in text generation during inference. Extensive experiments were conducted on the SST2 dataset, and the proposed method outperformed previous approaches in various evaluation metrics, including text fluency and attribute consistency.", }
Controllable text generation is a growing field within natural language generation (NLG) that focuses on producing text that meets specific constraints in real-world applications. Previous approaches, such as plug-and-play controllers (PPCs), aimed to steer the properties of generated text in a flexible manner. However, these methods often compromised the integrity of the language model{'}s decoding process, resulting in less smooth text generation.Alternatively, other techniques utilized multiple attribute prompts to align the generated text with desired attributes, but this approach required prompt design for each attribute and was dependent on the size of the language model. This paper introduces a novel method for flexible attribute control in text generation using pre-trained language models (PLMs). The proposed approach aims to enhance the fluency of generated text by guiding the generation process with PPCs. The key idea is to dynamically adjust the distribution of generated text by modifying prompts, effectively constraining the output space of the language model and influencing the desired attribute. To enable smooth cooperation between the PLM and the PPC, our work innovativel proposes a new model fine-tuning method: Reinforcement Learning with Dynamic Adjust Feedback (RLDAF).This fine-tuning process adapts a small subset of the language model{'}s parameters based on the generating actions taken during the PPC control process. The resulting harmonious collaboration between the PLM and PPC leads to improved smoothness in text generation during inference. Extensive experiments were conducted on the SST2 dataset, and the proposed method outperformed previous approaches in various evaluation metrics, including text fluency and attribute consistency.
[ "Wang, Hao", "Sha, Lei" ]
Harnessing the Plug-and-Play Controller by Prompting
gem-1.14
2402.04160
[ "" ]
https://huggingface.co/papers/2402.04160
0
1
0
2
[]
[]
[]
1
Poster
https://aclanthology.org/2023.gem-1.15.bib
https://aclanthology.org/2023.gem-1.15/
@inproceedings{kwak-etal-2023-context, title = "Context and Literacy Aware Learnable Metric for Text Simplification", author = "Kwak, Jeongwon and Park, Hyeryun and Kim, Kyungmo and Choi, Jinwook", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.15", pages = "175--180", abstract = "Automatic evaluation of text simplification is important; but assessing its transformation into simpler sentences can be challenging for various reasons. However, the most commonly used metric in text simplification, SARI, fails to capture the difficulty of generating words that are not present in the references, regardless of their meaning. We propose a new learnable evaluation metric that decomposes and reconstructs sentences to simultaneously measure the similarity and difficulty of sentences within a single system. Through experiments, we confirm that it exhibited the highest similarity in correlation with the human evaluation.", }
Automatic evaluation of text simplification is important; but assessing its transformation into simpler sentences can be challenging for various reasons. However, the most commonly used metric in text simplification, SARI, fails to capture the difficulty of generating words that are not present in the references, regardless of their meaning. We propose a new learnable evaluation metric that decomposes and reconstructs sentences to simultaneously measure the similarity and difficulty of sentences within a single system. Through experiments, we confirm that it exhibited the highest similarity in correlation with the human evaluation.
[ "Kwak, Jeongwon", "Park, Hyeryun", "Kim, Kyungmo", "Choi, Jinwook" ]
Context and Literacy Aware Learnable Metric for Text Simplification
gem-1.15
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.16.bib
https://aclanthology.org/2023.gem-1.16/
@inproceedings{abdullin-etal-2023-synthetic, title = "Synthetic Dialogue Dataset Generation using {LLM} Agents", author = "Abdullin, Yelaman and Molla, Diego and Ofoghi, Bahadorreza and Yearwood, John and Li, Qingyang", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.16", pages = "181--191", abstract = "Linear programming (LP) problems are pervasive in real-life applications. However, despite their apparent simplicity, an untrained user may find it difficult to determine the linear model of their specific problem. We envisage the creation of a goal-oriented conversational agent that will engage in conversation with the user to elicit all information required so that a subsequent agent can generate the linear model. In this paper, we present an approach for the generation of sample dialogues that can be used to develop and train such a conversational agent. Using prompt engineering, we develop two agents that {``}talk{''} to each other, one acting as the conversational agent, and the other acting as the user. Using a set of text descriptions of linear problems from NL4Opt available to the user only, the agent and the user engage in conversation until the agent has retrieved all key information from the original problem description. We also propose an extrinsic evaluation of the dialogues by assessing how well the summaries generated by the dialogues match the original problem descriptions. We conduct human and automatic evaluations, including an evaluation approach that uses GPT-4 to mimic the human evaluation metrics. The evaluation results show an overall good quality of the dialogues, though research is still needed to improve the quality of the GPT-4 evaluation metrics. The resulting dialogues, including the human annotations of a subset, are available to the research community. The conversational agent used for the generation of the dialogues can be used as a baseline.", }
Linear programming (LP) problems are pervasive in real-life applications. However, despite their apparent simplicity, an untrained user may find it difficult to determine the linear model of their specific problem. We envisage the creation of a goal-oriented conversational agent that will engage in conversation with the user to elicit all information required so that a subsequent agent can generate the linear model. In this paper, we present an approach for the generation of sample dialogues that can be used to develop and train such a conversational agent. Using prompt engineering, we develop two agents that {``}talk{''} to each other, one acting as the conversational agent, and the other acting as the user. Using a set of text descriptions of linear problems from NL4Opt available to the user only, the agent and the user engage in conversation until the agent has retrieved all key information from the original problem description. We also propose an extrinsic evaluation of the dialogues by assessing how well the summaries generated by the dialogues match the original problem descriptions. We conduct human and automatic evaluations, including an evaluation approach that uses GPT-4 to mimic the human evaluation metrics. The evaluation results show an overall good quality of the dialogues, though research is still needed to improve the quality of the GPT-4 evaluation metrics. The resulting dialogues, including the human annotations of a subset, are available to the research community. The conversational agent used for the generation of the dialogues can be used as a baseline.
[ "Abdullin, Yelaman", "Molla, Diego", "Ofoghi, Bahadorreza", "Yearwood, John", "Li, Qingyang" ]
Synthetic Dialogue Dataset Generation using LLM Agents
gem-1.16
2401.17461
[ "https://github.com/eabdullin/optimouse-quest" ]
https://huggingface.co/papers/2401.17461
1
1
0
5
[]
[]
[]
1
Poster
https://aclanthology.org/2023.gem-1.17.bib
https://aclanthology.org/2023.gem-1.17/
@inproceedings{lee-etal-2023-empirical, title = "An Empirical {B}ayes Framework for Open-Domain Dialogue Generation", author = "Lee, Jing Yang and Lee, Kong Aik and Gan, Woon Seng", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.17", pages = "192--204", abstract = "To engage human users in meaningful conversation, open-domain dialogue agents are required to generate diverse and contextually coherent dialogue. Despite recent advancements, which can be attributed to the usage of pretrained language models, the generation of diverse and coherent dialogue remains an open research problem. A popular approach to address this issue involves the adaptation of variational frameworks. However, while these approaches successfully improve diversity, they tend to compromise on contextual coherence. Hence, we propose the Bayesian Open-domain Dialogue with Empirical Bayes (BODEB) framework, an empirical bayes framework for constructing an Bayesian open-domain dialogue agent by leveraging pretrained parameters to inform the prior and posterior parameter distributions. Empirical results show that BODEB achieves better results in terms of both diversity and coherence compared to variational frameworks.", }
To engage human users in meaningful conversation, open-domain dialogue agents are required to generate diverse and contextually coherent dialogue. Despite recent advancements, which can be attributed to the usage of pretrained language models, the generation of diverse and coherent dialogue remains an open research problem. A popular approach to address this issue involves the adaptation of variational frameworks. However, while these approaches successfully improve diversity, they tend to compromise on contextual coherence. Hence, we propose the Bayesian Open-domain Dialogue with Empirical Bayes (BODEB) framework, an empirical bayes framework for constructing an Bayesian open-domain dialogue agent by leveraging pretrained parameters to inform the prior and posterior parameter distributions. Empirical results show that BODEB achieves better results in terms of both diversity and coherence compared to variational frameworks.
[ "Lee, Jing Yang", "Lee, Kong Aik", "Gan, Woon Seng" ]
An Empirical Bayes Framework for Open-Domain Dialogue Generation
gem-1.17
2311.10945
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.18.bib
https://aclanthology.org/2023.gem-1.18/
@inproceedings{imperial-tayyar-madabushi-2023-flesch, title = "Flesch or Fumble? Evaluating Readability Standard Alignment of Instruction-Tuned Language Models", author = "Imperial, Joseph Marvin and Tayyar Madabushi, Harish", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.18", pages = "205--223", abstract = "Readability metrics and standards such as Flesch Kincaid Grade Level (FKGL) and the Common European Framework of Reference for Languages (CEFR) exist to guide teachers and educators to properly assess the complexity of educational materials before administering them for classroom use. In this study, we select a diverse set of open and closed-source instruction-tuned language models and investigate their performances in writing story completions and simplifying narratives{---}tasks that teachers perform{---}using standard-guided prompts controlling text readability. Our extensive findings provide empirical proof of how globally recognized models like ChatGPT may be considered less effective and may require more refined prompts for these generative tasks compared to other open-sourced models such as BLOOMZ and FlanT5{---}which have shown promising results.", }
Readability metrics and standards such as Flesch Kincaid Grade Level (FKGL) and the Common European Framework of Reference for Languages (CEFR) exist to guide teachers and educators to properly assess the complexity of educational materials before administering them for classroom use. In this study, we select a diverse set of open and closed-source instruction-tuned language models and investigate their performances in writing story completions and simplifying narratives{---}tasks that teachers perform{---}using standard-guided prompts controlling text readability. Our extensive findings provide empirical proof of how globally recognized models like ChatGPT may be considered less effective and may require more refined prompts for these generative tasks compared to other open-sourced models such as BLOOMZ and FlanT5{---}which have shown promising results.
[ "Imperial, Joseph Marvin", "Tayyar Madabushi, Harish" ]
Flesch or Fumble? Evaluating Readability Standard Alignment of Instruction-Tuned Language Models
gem-1.18
2309.05454
[ "https://github.com/imperialite/readability-standard-alignment" ]
https://huggingface.co/papers/2309.05454
1
0
0
2
[]
[]
[]
1
Poster
https://aclanthology.org/2023.gem-1.19.bib
https://aclanthology.org/2023.gem-1.19/
@inproceedings{mcdanel-liu-2023-chatgpt, title = "{C}hat{GPT} as a {J}ava Decompiler", author = "Mcdanel, Bradley and Liu, Zhanhao", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.19", pages = "224--232", abstract = "We propose a novel approach using instruction-tuned large language models (LLMs), such as ChatGPT, to automatically decompile entire Java classes. Our method relies only on a textual representation of the Java bytecode and corresponding unit tests generated from the bytecode. While no additional domain knowledge or fine-tuning is performed, we provide a single training example of this decompilation process in the model{'}s prompt. To overcome both compilation errors and test failures, we use an iterative prompting approach. We find that ChatGPT-4 is able to generate more human-readable output than existing software-based decompilers while achieving slightly lower pass rates on unit tests. Source code and datasets are available at \url{https://github.com/BradMcDanel/gpt-java-decompiler}.", }
We propose a novel approach using instruction-tuned large language models (LLMs), such as ChatGPT, to automatically decompile entire Java classes. Our method relies only on a textual representation of the Java bytecode and corresponding unit tests generated from the bytecode. While no additional domain knowledge or fine-tuning is performed, we provide a single training example of this decompilation process in the model{'}s prompt. To overcome both compilation errors and test failures, we use an iterative prompting approach. We find that ChatGPT-4 is able to generate more human-readable output than existing software-based decompilers while achieving slightly lower pass rates on unit tests. Source code and datasets are available at \url{https://github.com/BradMcDanel/gpt-java-decompiler}.
[ "Mcdanel, Bradley", "Liu, Zhanhao" ]
ChatGPT as a Java Decompiler
gem-1.19
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.20.bib
https://aclanthology.org/2023.gem-1.20/
@inproceedings{demeter-etal-2023-multi, title = "Multi-domain Summarization from Leaderboards to Practice: Re-examining Automatic and Human Evaluation", author = "Demeter, David and Agarwal, Oshin and Ben Igeri, Simon and Sterbentz, Marko and Molino, Neil and Conroy, John and Nenkova, Ani", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.20", pages = "233--242", abstract = "Existing literature does not give much guidance on how to build the best possible multi-domain summarization model from existing components. We present an extensive evaluation of popular pre-trained models on a wide range of datasets to inform the selection of both the model and the training data for robust summarization across several domains. We find that fine-tuned BART performs better than T5 and PEGASUS, both on in-domain and out-of-domain data, regardless of the dataset used for fine-tuning. While BART has the best performance, it does vary considerably across domains. A multi-domain summarizer that works well for all domains can be built by simply fine-tuning on diverse domains. It even performs better than an in-domain summarizer, even when using fewer total training examples. While the success of such a multi-domain summarization model is clear through automatic evaluation, by conducting a human evaluation, we find that there are variations that can not be captured by any of the automatic evaluation metrics and thus not reflected in standard leaderboards. Furthermore, we find that conducting reliable human evaluation can be complex as well. Even experienced summarization researchers can be inconsistent with one another in their assessment of the quality of a summary, and also with themselves when re-annotating the same summary. The findings of our study are two-fold. First, BART fine-tuned on heterogeneous domains is a great multi-domain summarizer for practical purposes. At the same time, we need to re-examine not just automatic evaluation metrics but also human evaluation methods to responsibly measure progress in summarization.", }
Existing literature does not give much guidance on how to build the best possible multi-domain summarization model from existing components. We present an extensive evaluation of popular pre-trained models on a wide range of datasets to inform the selection of both the model and the training data for robust summarization across several domains. We find that fine-tuned BART performs better than T5 and PEGASUS, both on in-domain and out-of-domain data, regardless of the dataset used for fine-tuning. While BART has the best performance, it does vary considerably across domains. A multi-domain summarizer that works well for all domains can be built by simply fine-tuning on diverse domains. It even performs better than an in-domain summarizer, even when using fewer total training examples. While the success of such a multi-domain summarization model is clear through automatic evaluation, by conducting a human evaluation, we find that there are variations that can not be captured by any of the automatic evaluation metrics and thus not reflected in standard leaderboards. Furthermore, we find that conducting reliable human evaluation can be complex as well. Even experienced summarization researchers can be inconsistent with one another in their assessment of the quality of a summary, and also with themselves when re-annotating the same summary. The findings of our study are two-fold. First, BART fine-tuned on heterogeneous domains is a great multi-domain summarizer for practical purposes. At the same time, we need to re-examine not just automatic evaluation metrics but also human evaluation methods to responsibly measure progress in summarization.
[ "Demeter, David", "Agarwal, Oshin", "Ben Igeri, Simon", "Sterbentz, Marko", "Molino, Neil", "Conroy, John", "Nenkova, Ani" ]
Multi-domain Summarization from Leaderboards to Practice: Re-examining Automatic and Human Evaluation
gem-1.20
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.21.bib
https://aclanthology.org/2023.gem-1.21/
@inproceedings{barriere-etal-2023-targeted, title = "Targeted Image Data Augmentation Increases Basic Skills Captioning Robustness", author = "Barriere, Valentin and Del Rio, Felipe and Carvallo, Andres and Aspillaga, Carlos and Herrera-Berg, Eugenio and Buc, Cristian", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.21", pages = "243--257", abstract = "Artificial neural networks typically struggle in generalizing to out-of-context examples. One reason for this limitation is caused by having datasets that incorporate only partial information regarding the potential correlational structure of the world. In this work, we propose TIDA (Targeted Image-editing Data Augmentation), a targeted data augmentation method focused on improving models{'} human-like abilities (e.g., gender recognition) by filling the correlational structure gap using a text-to-image generative model. More specifically, TIDA identifies specific skills in captions describing images (e.g., the presence of a specific gender in the image), changes the caption (e.g., {``}woman{''} to {``}man{''}), and then uses a text-to-image model to edit the image in order to match the novel caption (e.g., uniquely changing a woman to a man while maintaining the context identical). Based on the Flickr30K benchmark, we show that, compared with the original data set, a TIDA-enhanced dataset related to gender, color, and counting abilities induces better performance in several image captioning metrics. Furthermore, on top of relying on the classical BLEU metric, we conduct a fine-grained analysis of the improvements of our models against the baseline in different ways. We compared text-to-image generative models and found different behaviors of the image captioning models in terms of encoding visual encoding and textual decoding.", }
Artificial neural networks typically struggle in generalizing to out-of-context examples. One reason for this limitation is caused by having datasets that incorporate only partial information regarding the potential correlational structure of the world. In this work, we propose TIDA (Targeted Image-editing Data Augmentation), a targeted data augmentation method focused on improving models{'} human-like abilities (e.g., gender recognition) by filling the correlational structure gap using a text-to-image generative model. More specifically, TIDA identifies specific skills in captions describing images (e.g., the presence of a specific gender in the image), changes the caption (e.g., {``}woman{''} to {``}man{''}), and then uses a text-to-image model to edit the image in order to match the novel caption (e.g., uniquely changing a woman to a man while maintaining the context identical). Based on the Flickr30K benchmark, we show that, compared with the original data set, a TIDA-enhanced dataset related to gender, color, and counting abilities induces better performance in several image captioning metrics. Furthermore, on top of relying on the classical BLEU metric, we conduct a fine-grained analysis of the improvements of our models against the baseline in different ways. We compared text-to-image generative models and found different behaviors of the image captioning models in terms of encoding visual encoding and textual decoding.
[ "Barriere, Valentin", "Del Rio, Felipe", "Carvallo, Andres", "Aspillaga, Carlos", "Herrera-Berg, Eugenio", "Buc, Cristian" ]
Targeted Image Data Augmentation Increases Basic Skills Captioning Robustness
gem-1.21
2309.15991
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.22.bib
https://aclanthology.org/2023.gem-1.22/
@inproceedings{ohmer-etal-2023-separating, title = "Separating form and meaning: Using self-consistency to quantify task understanding across multiple senses", author = "Ohmer, Xenia and Bruni, Elia and Hupkes, Dieuwke", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.22", pages = "258--276", abstract = "At the staggering pace with which the capabilities of large language models (LLMs) are increasing, creating future-proof evaluation sets to assess their understanding becomes more and more challenging. In this paper, we propose a novel paradigm for evaluating LLMs which leverages the idea that correct world understanding should be consistent across different (Fregean) senses of the same meaning. Accordingly, we measure understanding not in terms of correctness but by evaluating consistency across multiple senses that are generated by the model itself. We showcase our approach by instantiating a test where the different senses are different languages, hence using multilingual self-consistency as a litmus test for the model{'}s understanding and simultaneously addressing the important topic of multilingualism. Taking one of the latest versions of ChatGPT as our object of study, we evaluate multilingual consistency for two different tasks across three different languages. We show that its multilingual consistency is still lacking, and that its task and world understanding are thus not language-independent. As our approach does not require any static evaluation corpora in languages other than English, it can easily and cheaply be extended to different languages and tasks and could become an integral part of future benchmarking efforts.", }
At the staggering pace with which the capabilities of large language models (LLMs) are increasing, creating future-proof evaluation sets to assess their understanding becomes more and more challenging. In this paper, we propose a novel paradigm for evaluating LLMs which leverages the idea that correct world understanding should be consistent across different (Fregean) senses of the same meaning. Accordingly, we measure understanding not in terms of correctness but by evaluating consistency across multiple senses that are generated by the model itself. We showcase our approach by instantiating a test where the different senses are different languages, hence using multilingual self-consistency as a litmus test for the model{'}s understanding and simultaneously addressing the important topic of multilingualism. Taking one of the latest versions of ChatGPT as our object of study, we evaluate multilingual consistency for two different tasks across three different languages. We show that its multilingual consistency is still lacking, and that its task and world understanding are thus not language-independent. As our approach does not require any static evaluation corpora in languages other than English, it can easily and cheaply be extended to different languages and tasks and could become an integral part of future benchmarking efforts.
[ "Ohmer, Xenia", "Bruni, Elia", "Hupkes, Dieuwke" ]
Separating form and meaning: Using self-consistency to quantify task understanding across multiple senses
gem-1.22
2305.11662
[ "https://github.com/xeniaohmer/multisense_consistency" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.23.bib
https://aclanthology.org/2023.gem-1.23/
@inproceedings{gatto-etal-2023-text, title = "Text Encoders Lack Knowledge: Leveraging Generative {LLM}s for Domain-Specific Semantic Textual Similarity", author = "Gatto, Joseph and Sharif, Omar and Seegmiller, Parker and Bohlman, Philip and Preum, Sarah", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.23", pages = "277--288", abstract = "Amidst the sharp rise in the evaluation of large language models (LLMs) on various tasks, we find that semantic textual similarity (STS) has been under-explored. In this study, we show that STS can be cast as a text generation problem while maintaining strong performance on multiple STS benchmarks. Additionally, we show generative LLMs significantly outperform existing encoder-based STS models when characterizing the semantic similarity between two texts with complex semantic relationships dependent on world knowledge. We validate this claim by evaluating both generative LLMs and existing encoder-based STS models on three newly-collected STS challenge sets which require world knowledge in the domains of Health, Politics, and Sports. All newly-collected data is sourced from social media content posted after May 2023 to ensure the performance of closed-source models like ChatGPT cannot be credited to memorization. Our results show that, on average, generative LLMs outperform the best encoder-only baselines by an average of 22.3{\%} on STS tasks requiring world knowledge. Our results suggest generative language models with STS-specific prompting strategies achieve state-of-the-art performance in complex, domain-specific STS tasks.", }
Amidst the sharp rise in the evaluation of large language models (LLMs) on various tasks, we find that semantic textual similarity (STS) has been under-explored. In this study, we show that STS can be cast as a text generation problem while maintaining strong performance on multiple STS benchmarks. Additionally, we show generative LLMs significantly outperform existing encoder-based STS models when characterizing the semantic similarity between two texts with complex semantic relationships dependent on world knowledge. We validate this claim by evaluating both generative LLMs and existing encoder-based STS models on three newly-collected STS challenge sets which require world knowledge in the domains of Health, Politics, and Sports. All newly-collected data is sourced from social media content posted after May 2023 to ensure the performance of closed-source models like ChatGPT cannot be credited to memorization. Our results show that, on average, generative LLMs outperform the best encoder-only baselines by an average of 22.3{\%} on STS tasks requiring world knowledge. Our results suggest generative language models with STS-specific prompting strategies achieve state-of-the-art performance in complex, domain-specific STS tasks.
[ "Gatto, Joseph", "Sharif, Omar", "Seegmiller, Parker", "Bohlman, Philip", "Preum, Sarah" ]
Text Encoders Lack Knowledge: Leveraging Generative LLMs for Domain-Specific Semantic Textual Similarity
gem-1.23
2309.06541
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.24.bib
https://aclanthology.org/2023.gem-1.24/
@inproceedings{sasse-etal-2023-burst, title = "To Burst or Not to Burst: Generating and Quantifying Improbable Text", author = "Sasse, Kuleen and Sarioglu Kayi, Efsun and Barham, Samuel and Staley, Edward", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.24", pages = "289--309", abstract = "While large language models (LLMs) are extremely capable at text generation, their outputs are still distinguishable from human-authored text. We explore this separation across many metrics over text, many sampling techniques, many types of text data, and across two popular LLMs, LLaMA and Vicuna. Along the way, we introduce a new metric, recoverability, to highlight differences between human and machine text; and we propose a new sampling technique, burst sampling, designed to close this gap. We find that LLaMA and Vicuna have distinct distributions under many of the metrics, and that this influences our results: Recoverability separates real from fake text better than any other metric when using LLaMA. When using Vicuna, burst sampling produces text which is distributionally closer to real text compared to other sampling techniques.", }
While large language models (LLMs) are extremely capable at text generation, their outputs are still distinguishable from human-authored text. We explore this separation across many metrics over text, many sampling techniques, many types of text data, and across two popular LLMs, LLaMA and Vicuna. Along the way, we introduce a new metric, recoverability, to highlight differences between human and machine text; and we propose a new sampling technique, burst sampling, designed to close this gap. We find that LLaMA and Vicuna have distinct distributions under many of the metrics, and that this influences our results: Recoverability separates real from fake text better than any other metric when using LLaMA. When using Vicuna, burst sampling produces text which is distributionally closer to real text compared to other sampling techniques.
[ "Sasse, Kuleen", "Sarioglu Kayi, Efsun", "Barham, Samuel", "Staley, Edward" ]
To Burst or Not to Burst: Generating and Quantifying Improbable Text
gem-1.24
2401.15476
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.25.bib
https://aclanthology.org/2023.gem-1.25/
@inproceedings{fu-etal-2023-large, title = "Are Large Language Models Reliable Judges? A Study on the Factuality Evaluation Capabilities of {LLM}s", author = "Fu, Xue-Yong and Laskar, Md Tahmid Rahman and Chen, Cheng and Tn, Shashi Bhushan", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.25", pages = "310--316", abstract = "In recent years, large language models (LLMs) have drawn significant attention due to their impressive emergent capabilities that were not observed in earlier language models. One emerging area where LLMs have been widely used in recent times is the utilization of LLMs as the evaluator of the texts generated by various generative models. In this paper, we also explore the possibility of whether LLMs are reliable in assessing the factual consistency of summaries generated by text generation models. We first propose a new approach to evaluate the factuality score using LLMs by utilizing the same LLM to perform all steps in the question-answering-based factuality scoring pipeline. Subsequently, we study the performance of various LLMs to directly score the factuality. Our evaluation is conducted in traditional benchmarks by comparing their correlation with human annotations. Contrary to expectations, our findings revealed that none of the factuality metrics showed any significant correlations (e.g., coefficient scores greater than 0.3) to human evaluations of factuality for GPT-4, PaLM-2, and Claude-2, with the only exception being GPT-3.5 in two subcategories of factuality. Nonetheless, our findings are consistent across almost all factual error types, suggesting a fundamental limitation in the ability of current LLMs to assess factuality.", }
In recent years, large language models (LLMs) have drawn significant attention due to their impressive emergent capabilities that were not observed in earlier language models. One emerging area where LLMs have been widely used in recent times is the utilization of LLMs as the evaluator of the texts generated by various generative models. In this paper, we also explore the possibility of whether LLMs are reliable in assessing the factual consistency of summaries generated by text generation models. We first propose a new approach to evaluate the factuality score using LLMs by utilizing the same LLM to perform all steps in the question-answering-based factuality scoring pipeline. Subsequently, we study the performance of various LLMs to directly score the factuality. Our evaluation is conducted in traditional benchmarks by comparing their correlation with human annotations. Contrary to expectations, our findings revealed that none of the factuality metrics showed any significant correlations (e.g., coefficient scores greater than 0.3) to human evaluations of factuality for GPT-4, PaLM-2, and Claude-2, with the only exception being GPT-3.5 in two subcategories of factuality. Nonetheless, our findings are consistent across almost all factual error types, suggesting a fundamental limitation in the ability of current LLMs to assess factuality.
[ "Fu, Xue-Yong", "Laskar, Md Tahmid Rahman", "Chen, Cheng", "Tn, Shashi Bhushan" ]
Are Large Language Models Reliable Judges? A Study on the Factuality Evaluation Capabilities of LLMs
gem-1.25
2311.00681
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.26.bib
https://aclanthology.org/2023.gem-1.26/
@inproceedings{roy-basu-2023-rankaug, title = "{R}ank{A}ug: Augmented data ranking for text classification", author = "Roy, Tiasa and Basu, Priyam", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.26", pages = "317--323", abstract = "Research on data generation and augmentation has been focused majorly around enhancing generation models, leaving a notable gap in the exploration and refinement of methods for evaluating synthetic data. There are several text similarity metrics within the context of generated data filtering which can impact the performance of specific Natural Language Understanding (NLU) tasks, specifically focusing on intent and sentiment classification. In this study, we propose RankAug, a text-ranking approach that detects and filters out the top augmented texts in terms of being most similar in meaning with lexical and syntactical diversity. Through experiments conducted on multiple datasets, we demonstrate that the judicious selection of filtering techniques can yield a substantial improvement of up to 35{\%} in classification accuracy for under-represented classes.", }
Research on data generation and augmentation has been focused majorly around enhancing generation models, leaving a notable gap in the exploration and refinement of methods for evaluating synthetic data. There are several text similarity metrics within the context of generated data filtering which can impact the performance of specific Natural Language Understanding (NLU) tasks, specifically focusing on intent and sentiment classification. In this study, we propose RankAug, a text-ranking approach that detects and filters out the top augmented texts in terms of being most similar in meaning with lexical and syntactical diversity. Through experiments conducted on multiple datasets, we demonstrate that the judicious selection of filtering techniques can yield a substantial improvement of up to 35{\%} in classification accuracy for under-represented classes.
[ "Roy, Tiasa", "Basu, Priyam" ]
RankAug: Augmented data ranking for text classification
gem-1.26
2311.04535
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.27.bib
https://aclanthology.org/2023.gem-1.27/
@inproceedings{caswell-etal-2023-separating, title = "Separating the Wheat from the Chaff with {BREAD}: An open-source benchmark and metrics to detect redundancy in text", author = "Caswell, Isaac and Wang, Lisa and Papadimitriou, Isabel", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.27", pages = "324--338", abstract = "Data quality is a problem that perpetually resurfaces throughout the field of NLP, regardless of task, domain, or architecture, and remains especially severe for lower-resource languages. A typical and insidious issue, affecting both training data and model output, is data that is repetitive and dominated by linguistically uninteresting boilerplate, such as price catalogs or computer-generated log files. Though this problem permeates many web-scraped corpora, there has yet to be a benchmark to test against, or a systematic study to find simple metrics that generalize across languages and agree with human judgements of data quality. In the present work, we create and release BREAD, a human-labeled benchmark on repetitive boilerplate vs. plausible linguistic content, spanning 360 languages. We release several baseline CRED (Character REDundancy) scores along with it, and evaluate their effectiveness on BREAD. We hope that the community will use this resource to develop better filtering methods, and that our reference implementations of CRED scores can become standard corpus evaluation tools, driving the development of cleaner language modeling corpora, especially in low-resource languages.", }
Data quality is a problem that perpetually resurfaces throughout the field of NLP, regardless of task, domain, or architecture, and remains especially severe for lower-resource languages. A typical and insidious issue, affecting both training data and model output, is data that is repetitive and dominated by linguistically uninteresting boilerplate, such as price catalogs or computer-generated log files. Though this problem permeates many web-scraped corpora, there has yet to be a benchmark to test against, or a systematic study to find simple metrics that generalize across languages and agree with human judgements of data quality. In the present work, we create and release BREAD, a human-labeled benchmark on repetitive boilerplate vs. plausible linguistic content, spanning 360 languages. We release several baseline CRED (Character REDundancy) scores along with it, and evaluate their effectiveness on BREAD. We hope that the community will use this resource to develop better filtering methods, and that our reference implementations of CRED scores can become standard corpus evaluation tools, driving the development of cleaner language modeling corpora, especially in low-resource languages.
[ "Caswell, Isaac", "Wang, Lisa", "Papadimitriou, Isabel" ]
Separating the Wheat from the Chaff with BREAD: An open-source benchmark and metrics to detect redundancy in text
gem-1.27
2311.06440
[ "https://github.com/toizzy/bread" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.28.bib
https://aclanthology.org/2023.gem-1.28/
@inproceedings{boubdir-etal-2023-elo, title = "Elo Uncovered: Robustness and Best Practices in Language Model Evaluation", author = "Boubdir, Meriem and Kim, Edward and Ermis, Beyza and Hooker, Sara and Fadaee, Marzieh", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.28", pages = "339--352", abstract = "In Natural Language Processing (NLP), the Elo rating system, well-established for ranking dynamic competitors in games like chess, has seen increasing adoption for evaluating Large Language Models (LLMs) through {``}A vs B{''} paired comparisons. However, while popular, the system{'}s suitability for assessing entities with constant skill levels, such as LLMs, remains relatively unexplored. Our study investigates the sensitivity and reproducibility of Elo scores for LLMs, integrating both synthetic and human feedback. We show that Elo ratings for LLMs stabilize with 100 or more comparison permutations. A lower K-factor is preferable for closely matched models, whereas a higher K-factor better distinguishes models with clear performance differences. We also report that transitivity (A B and B C implies A C) does not consistently hold, particularly when models demonstrate similar performance. Our empirical findings provide guidelines for more reliable LLM evaluation.", }
In Natural Language Processing (NLP), the Elo rating system, well-established for ranking dynamic competitors in games like chess, has seen increasing adoption for evaluating Large Language Models (LLMs) through {``}A vs B{''} paired comparisons. However, while popular, the system{'}s suitability for assessing entities with constant skill levels, such as LLMs, remains relatively unexplored. Our study investigates the sensitivity and reproducibility of Elo scores for LLMs, integrating both synthetic and human feedback. We show that Elo ratings for LLMs stabilize with 100 or more comparison permutations. A lower K-factor is preferable for closely matched models, whereas a higher K-factor better distinguishes models with clear performance differences. We also report that transitivity (A B and B C implies A C) does not consistently hold, particularly when models demonstrate similar performance. Our empirical findings provide guidelines for more reliable LLM evaluation.
[ "Boubdir, Meriem", "Kim, Edward", "Ermis, Beyza", "Hooker, Sara", "Fadaee, Marzieh" ]
Elo Uncovered: Robustness and Best Practices in Language Model Evaluation
gem-1.28
2311.17295
[ "" ]
https://huggingface.co/papers/2311.17295
4
0
0
5
[]
[]
[]
1
Poster
https://aclanthology.org/2023.gem-1.29.bib
https://aclanthology.org/2023.gem-1.29/
@inproceedings{lotfi-etal-2023-personalitychat, title = "{P}ersonality{C}hat: Conversation Distillation for Personalized Dialog Modeling with Facts and Traits", author = "Lotfi, Ehsan and De Bruyn, Maxime and Buhmann, Jeska and Daelemans, Walter", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.29", pages = "353--371", abstract = "The new wave of Large Language Models (LLM) has offered an efficient tool to curate sizeable conversational datasets. So far studies have mainly focused on task-oriented or generic open-domain dialogs, and have not fully explored the ability of LLMs in following complicated prompts. In this work, we focus on personalization, and employ LLMs to curate a dataset which is difficult and costly to crowd-source: PersonalityChat is a synthetic conversational dataset based upon the popular PersonaChat dataset, but conditioned on both personas and (Big-5) personality traits. Evaluating models fine-tuned on this dataset, we show that the personality trait labels can be used for trait-based personalization of generative dialogue models. We also perform a head-to-head comparison between PersonalityChat and PersonaChat, and show that training on the distilled dataset results in more fluent and coherent dialog agents in the small-model regime.", }
The new wave of Large Language Models (LLM) has offered an efficient tool to curate sizeable conversational datasets. So far studies have mainly focused on task-oriented or generic open-domain dialogs, and have not fully explored the ability of LLMs in following complicated prompts. In this work, we focus on personalization, and employ LLMs to curate a dataset which is difficult and costly to crowd-source: PersonalityChat is a synthetic conversational dataset based upon the popular PersonaChat dataset, but conditioned on both personas and (Big-5) personality traits. Evaluating models fine-tuned on this dataset, we show that the personality trait labels can be used for trait-based personalization of generative dialogue models. We also perform a head-to-head comparison between PersonalityChat and PersonaChat, and show that training on the distilled dataset results in more fluent and coherent dialog agents in the small-model regime.
[ "Lotfi, Ehsan", "De Bruyn, Maxime", "Buhmann, Jeska", "Daelemans, Walter" ]
PersonalityChat: Conversation Distillation for Personalized Dialog Modeling with Facts and Traits
gem-1.29
2401.07363
[ "https://github.com/elotfi/personalitychat" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.30.bib
https://aclanthology.org/2023.gem-1.30/
@inproceedings{chanthran-etal-2023-well, title = "How well {C}hat{GPT} understand {M}alaysian {E}nglish? An Evaluation on Named Entity Recognition and Relation Extraction", author = "Chanthran, Mohanraj and Soon, Lay-Ki and Fang, Ong Huey and Selvaretnam, Bhawani", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.30", pages = "372--397", abstract = "Recently, ChatGPT has attracted a lot of interest from both researchers and the general public. While the performance of ChatGPT in Named Entity Recognition and Relation Extraction from Standard English texts is satisfactory, it remains to be seen if it can perform similarly for Malaysian English. Malaysian English is unique as it exhibits morphosyntactic and semantical adaptation from local contexts. In this study, we assess ChatGPT{'}s capability in extracting entities and relations from the Malaysian English News (MEN) dataset. We propose a three-step methodology referred to as \textbf{educate-predict-evaluate}. The performance of ChatGPT is assessed using F1-Score across 18 unique prompt settings, which were carefully engineered for a comprehensive review. From our evaluation, we found that ChatGPT does not perform well in extracting entities from Malaysian English news articles, with the highest F1-Score of 0.497. Further analysis shows that the morphosyntactic adaptation in Malaysian English caused the limitation. However, interestingly, this morphosyntactic adaptation does not impact the performance of ChatGPT for relation extraction.", }
Recently, ChatGPT has attracted a lot of interest from both researchers and the general public. While the performance of ChatGPT in Named Entity Recognition and Relation Extraction from Standard English texts is satisfactory, it remains to be seen if it can perform similarly for Malaysian English. Malaysian English is unique as it exhibits morphosyntactic and semantical adaptation from local contexts. In this study, we assess ChatGPT{'}s capability in extracting entities and relations from the Malaysian English News (MEN) dataset. We propose a three-step methodology referred to as \textbf{educate-predict-evaluate}. The performance of ChatGPT is assessed using F1-Score across 18 unique prompt settings, which were carefully engineered for a comprehensive review. From our evaluation, we found that ChatGPT does not perform well in extracting entities from Malaysian English news articles, with the highest F1-Score of 0.497. Further analysis shows that the morphosyntactic adaptation in Malaysian English caused the limitation. However, interestingly, this morphosyntactic adaptation does not impact the performance of ChatGPT for relation extraction.
[ "Chanthran, Mohanraj", "Soon, Lay-Ki", "Fang, Ong Huey", "Selvaretnam, Bhawani" ]
How well ChatGPT understand Malaysian English? An Evaluation on Named Entity Recognition and Relation Extraction
gem-1.30
2311.11583
[ "https://github.com/mohanraj-nlp/chatgpt-malaysian-english" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.31.bib
https://aclanthology.org/2023.gem-1.31/
@inproceedings{tikhonov-yamshchikov-2023-post, title = "Post {T}uring: Mapping the landscape of {LLM} Evaluation", author = "Tikhonov, Alexey and Yamshchikov, Ivan P.", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.31", pages = "398--412", abstract = "In the rapidly evolving landscape of Large Language Models (LLMs), introduction of well-defined and standardized evaluation methodologies remains a crucial challenge. This paper traces the historical trajectory of LLM evaluations, from the foundational questions posed by Alan Turing to the modern era of AI research. We categorize the evolution of LLMs into distinct periods, each characterized by its unique benchmarks and evaluation criteria. As LLMs increasingly mimic human-like behaviors, traditional evaluation proxies, such as the Turing test, have become less reliable. We emphasize the pressing need for a unified evaluation system, given the broader societal implications of these models. Through an analysis of common evaluation methodologies, we advocate for a qualitative shift in assessment approaches, underscoring the importance of standardization and objective criteria. This work serves as a call for the AI community to collaboratively address the challenges of LLM evaluation, ensuring their reliability, fairness, and societal benefit.", }
In the rapidly evolving landscape of Large Language Models (LLMs), introduction of well-defined and standardized evaluation methodologies remains a crucial challenge. This paper traces the historical trajectory of LLM evaluations, from the foundational questions posed by Alan Turing to the modern era of AI research. We categorize the evolution of LLMs into distinct periods, each characterized by its unique benchmarks and evaluation criteria. As LLMs increasingly mimic human-like behaviors, traditional evaluation proxies, such as the Turing test, have become less reliable. We emphasize the pressing need for a unified evaluation system, given the broader societal implications of these models. Through an analysis of common evaluation methodologies, we advocate for a qualitative shift in assessment approaches, underscoring the importance of standardization and objective criteria. This work serves as a call for the AI community to collaboratively address the challenges of LLM evaluation, ensuring their reliability, fairness, and societal benefit.
[ "Tikhonov, Alexey", "Yamshchikov, Ivan P." ]
Post Turing: Mapping the landscape of LLM Evaluation
gem-1.31
2311.02049
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.gem-1.32.bib
https://aclanthology.org/2023.gem-1.32/
@inproceedings{abburi-etal-2023-simple, title = "A Simple yet Efficient Ensemble Approach for {AI}-generated Text Detection", author = "Abburi, Harika and Roy, Kalyani and Suesserman, Michael and Pudota, Nirmala and Veeramani, Balaji and Bowen, Edward and Bhattacharya, Sanmitra", editor = "Gehrmann, Sebastian and Wang, Alex and Sedoc, Jo{\~a}o and Clark, Elizabeth and Dhole, Kaustubh and Chandu, Khyathi Raghavi and Santus, Enrico and Sedghamiz, Hooman", booktitle = "Proceedings of the Third Workshop on Natural Language Generation, Evaluation, and Metrics (GEM)", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.gem-1.32", pages = "413--421", abstract = "Recent Large Language Models (LLMs) have demonstrated remarkable capabilities in generating text that closely resembles human writing across wide range of styles and genres. However, such capabilities are prone to potential abuse, such as fake news generation, spam email creation, and misuse in academic assignments. Hence, it is essential to build automated approaches capable of distinguishing between artificially generated text and human-authored text. In this paper, we propose a simple yet efficient solution to this problem by ensembling predictions from multiple constituent LLMs. Compared to previous state-of-the-art approaches, which are perplexity-based or uses ensembles with a large number of LLMs, our condensed ensembling approach uses only two constituent LLMs to achieve comparable performance. Experiments conducted on four benchmark datasets for generative text classification show performance improvements in the range of 0.5 to 100{\%} compared to previous state-of-the-art approaches. We also study that the influence the training data from individual LLMs have on model performance. We found that substituting commercially-restrictive Generative Pre-trained Transformer (GPT) data with data generated from other open language models such as Falcon, Large Language Model Meta AI (LLaMA2), and Mosaic Pretrained Transformers (MPT) is a feasible alternative when developing generative text detectors. Furthermore, to demonstrate zero-shot generalization, we experimented with an English essays dataset, and results suggest that our ensembling approach can handle new data effectively.", }
Recent Large Language Models (LLMs) have demonstrated remarkable capabilities in generating text that closely resembles human writing across wide range of styles and genres. However, such capabilities are prone to potential abuse, such as fake news generation, spam email creation, and misuse in academic assignments. Hence, it is essential to build automated approaches capable of distinguishing between artificially generated text and human-authored text. In this paper, we propose a simple yet efficient solution to this problem by ensembling predictions from multiple constituent LLMs. Compared to previous state-of-the-art approaches, which are perplexity-based or uses ensembles with a large number of LLMs, our condensed ensembling approach uses only two constituent LLMs to achieve comparable performance. Experiments conducted on four benchmark datasets for generative text classification show performance improvements in the range of 0.5 to 100{\%} compared to previous state-of-the-art approaches. We also study that the influence the training data from individual LLMs have on model performance. We found that substituting commercially-restrictive Generative Pre-trained Transformer (GPT) data with data generated from other open language models such as Falcon, Large Language Model Meta AI (LLaMA2), and Mosaic Pretrained Transformers (MPT) is a feasible alternative when developing generative text detectors. Furthermore, to demonstrate zero-shot generalization, we experimented with an English essays dataset, and results suggest that our ensembling approach can handle new data effectively.
[ "Abburi, Harika", "Roy, Kalyani", "Suesserman, Michael", "Pudota, Nirmala", "Veeramani, Balaji", "Bowen, Edward", "Bhattacharya, Sanmitra" ]
A Simple yet Efficient Ensemble Approach for AI-generated Text Detection
gem-1.32
2311.03084
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.1.bib
https://aclanthology.org/2023.genbench-1.1/
@inproceedings{saini-etal-2023-90, title = "90{\%} F1 Score in Relation Triple Extraction: Is it Real?", author = "Saini, Pratik and Pal, Samiran and Nayak, Tapas and Bhattacharya, Indrajit", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.1", doi = "10.18653/v1/2023.genbench-1.1", pages = "1--11", abstract = "Extracting relational triples from text is a crucial task for constructing knowledge bases. Recent advancements in joint entity and relation extraction models have demonstrated remarkable F1 scores ({\mbox{$\geq$}} 90{\%}) in accurately extracting relational triples from free text. However, these models have been evaluated under restrictive experimental settings and unrealistic datasets. They overlook sentences with zero triples (zerocardinality), thereby simplifying the task. In this paper, we present a benchmark study of state-of-the-art joint entity and relation extraction models under a more realistic setting. We include sentences that lack any triples in our experiments, providing a comprehensive evaluation. Our findings reveal a significant decline (approximately 10-15{\%} in one dataset and 6-14{\%} in another dataset) in the models{'} F1 scores within this realistic experimental setup. Furthermore, we propose a two-step modeling approach that utilizes a simple BERT-based classifier. This approach leads to overall performance improvement in these models within the realistic experimental setting.", }
Extracting relational triples from text is a crucial task for constructing knowledge bases. Recent advancements in joint entity and relation extraction models have demonstrated remarkable F1 scores ({\mbox{$\geq$}} 90{\%}) in accurately extracting relational triples from free text. However, these models have been evaluated under restrictive experimental settings and unrealistic datasets. They overlook sentences with zero triples (zerocardinality), thereby simplifying the task. In this paper, we present a benchmark study of state-of-the-art joint entity and relation extraction models under a more realistic setting. We include sentences that lack any triples in our experiments, providing a comprehensive evaluation. Our findings reveal a significant decline (approximately 10-15{\%} in one dataset and 6-14{\%} in another dataset) in the models{'} F1 scores within this realistic experimental setup. Furthermore, we propose a two-step modeling approach that utilizes a simple BERT-based classifier. This approach leads to overall performance improvement in these models within the realistic experimental setting.
[ "Saini, Pratik", "Pal, Samiran", "Nayak, Tapas", "Bhattacharya, Indrajit" ]
90% F1 Score in Relation Triple Extraction: Is it Real?
genbench-1.1
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.2.bib
https://aclanthology.org/2023.genbench-1.2/
@inproceedings{diera-etal-2023-gencodesearchnet, title = "{G}en{C}ode{S}earch{N}et: A Benchmark Test Suite for Evaluating Generalization in Programming Language Understanding", author = "Diera, Andor and Dahou, Abdelhalim and Galke, Lukas and Karl, Fabian and Sihler, Florian and Scherp, Ansgar", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.2", doi = "10.18653/v1/2023.genbench-1.2", pages = "12--24", abstract = "Language models can serve as a valuable tool for software developers to increase productivity. Large generative models can be used for code generation and code completion, while smaller encoder-only models are capable of performing code search tasks using natural language queries. These capabilities are heavily influenced by the quality and diversity of the available training data. Source code datasets used for training usually focus on the most popular languages and testing is mostly conducted on the same distributions, often overlooking low-resource programming languages. Motivated by the NLP generalization taxonomy proposed by Hupkes et.,al., we propose a new benchmark dataset called GenCodeSearchNet (GeCS) which builds upon existing natural language code search datasets to systemically evaluate the programming language understanding generalization capabilities of language models. As part of the full dataset, we introduce a new, manually curated subset StatCodeSearch that focuses on R, a popular but so far underrepresented programming language that is often used by researchers outside the field of computer science. For evaluation and comparison, we collect several baseline results using fine-tuned BERT-style models and GPT-style large language models in a zero-shot setting.", }
Language models can serve as a valuable tool for software developers to increase productivity. Large generative models can be used for code generation and code completion, while smaller encoder-only models are capable of performing code search tasks using natural language queries. These capabilities are heavily influenced by the quality and diversity of the available training data. Source code datasets used for training usually focus on the most popular languages and testing is mostly conducted on the same distributions, often overlooking low-resource programming languages. Motivated by the NLP generalization taxonomy proposed by Hupkes et.,al., we propose a new benchmark dataset called GenCodeSearchNet (GeCS) which builds upon existing natural language code search datasets to systemically evaluate the programming language understanding generalization capabilities of language models. As part of the full dataset, we introduce a new, manually curated subset StatCodeSearch that focuses on R, a popular but so far underrepresented programming language that is often used by researchers outside the field of computer science. For evaluation and comparison, we collect several baseline results using fine-tuned BERT-style models and GPT-style large language models in a zero-shot setting.
[ "Diera, Andor", "Dahou, Abdelhalim", "Galke, Lukas", "Karl, Fabian", "Sihler, Florian", "Scherp, Ansgar" ]
GenCodeSearchNet: A Benchmark Test Suite for Evaluating Generalization in Programming Language Understanding
genbench-1.2
2311.09707
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.3.bib
https://aclanthology.org/2023.genbench-1.3/
@inproceedings{arora-etal-2023-adapt, title = "Adapt and Decompose: Efficient Generalization of Text-to-{SQL} via Domain Adapted Least-To-Most Prompting", author = "Arora, Aseem and Bhaisaheb, Shabbirhussain and Nigam, Harshit and Patwardhan, Manasi and Vig, Lovekesh and Shroff, Gautam", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.3", doi = "10.18653/v1/2023.genbench-1.3", pages = "25--47", abstract = "Cross-domain and cross-compositional generalization of Text-to-SQL semantic parsing is a challenging task. Existing Large Language Model (LLM) based solutions rely on inference-time retrieval of few-shot exemplars from the training set to synthesize a run-time prompt for each Natural Language (NL) test query. In contrast, we devise an algorithm which performs offline sampling of a minimal set-of few-shots from the training data, with complete coverage of SQL clauses, operators and functions, and maximal domain coverage within the allowed token length. This allows for synthesis of a fixed Generic Prompt (GP), with a diverse set-of exemplars common across NL test queries, avoiding expensive test time exemplar retrieval. We further auto-adapt the GP to the target database domain (DA-GP), to better handle cross-domain generalization; followed by a decomposed Least-To-Most-Prompting (LTMP-DA-GP) to handle cross-compositional generalization. The synthesis of LTMP-DA-GP is an offline task, to be performed one-time per new database with minimal human intervention. Our approach demonstrates superior performance on the KaggleDBQA dataset, designed to evaluate generalizability for the Text-to-SQL task. We further showcase consistent performance improvement of LTMP-DA-GP over GP, across LLMs and databases of KaggleDBQA, highlighting the efficacy and model agnostic benefits of our prompt based adapt and decompose approach.", }
Cross-domain and cross-compositional generalization of Text-to-SQL semantic parsing is a challenging task. Existing Large Language Model (LLM) based solutions rely on inference-time retrieval of few-shot exemplars from the training set to synthesize a run-time prompt for each Natural Language (NL) test query. In contrast, we devise an algorithm which performs offline sampling of a minimal set-of few-shots from the training data, with complete coverage of SQL clauses, operators and functions, and maximal domain coverage within the allowed token length. This allows for synthesis of a fixed Generic Prompt (GP), with a diverse set-of exemplars common across NL test queries, avoiding expensive test time exemplar retrieval. We further auto-adapt the GP to the target database domain (DA-GP), to better handle cross-domain generalization; followed by a decomposed Least-To-Most-Prompting (LTMP-DA-GP) to handle cross-compositional generalization. The synthesis of LTMP-DA-GP is an offline task, to be performed one-time per new database with minimal human intervention. Our approach demonstrates superior performance on the KaggleDBQA dataset, designed to evaluate generalizability for the Text-to-SQL task. We further showcase consistent performance improvement of LTMP-DA-GP over GP, across LLMs and databases of KaggleDBQA, highlighting the efficacy and model agnostic benefits of our prompt based adapt and decompose approach.
[ "Arora, Aseem", "Bhaisaheb, Shabbirhussain", "Nigam, Harshit", "Patwardhan, Manasi", "Vig, Lovekesh", "Shroff, Gautam" ]
Adapt and Decompose: Efficient Generalization of Text-to-SQL via Domain Adapted Least-To-Most Prompting
genbench-1.3
2308.02582
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.4.bib
https://aclanthology.org/2023.genbench-1.4/
@inproceedings{javier-vazquez-martinez-etal-2023-evaluating, title = "Evaluating Neural Language Models as Cognitive Models of Language Acquisition", author = "V{\'a}zquez Mart{\'\i}nez, H{\'e}ctor and Lea Heuser, Annika and Yang, Charles and Kodner, Jordan", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.4", doi = "10.18653/v1/2023.genbench-1.4", pages = "48--64", abstract = "The success of neural language models (LMs) on many technological tasks has brought about their potential relevance as scientific theories of language despite some clear differences between LM training and child language acquisition. In this paper we argue that some of the most prominent benchmarks for evaluating the syntactic capacities of LMs may not be sufficiently rigorous. In particular, we show that the template-based benchmarks lack the structural diversity commonly found in the theoretical and psychological studies of language. When trained on small-scale data modeling child language acquisition, the LMs can be readily matched by simple baseline models. We advocate for the use of the readily available, carefully curated datasets that have been evaluated for gradient acceptability by large pools of native speakers and are designed to probe the structural basis of grammar specifically. On one such dataset, the LI-Adger dataset, LMs evaluate sentences in a way inconsistent with human language users. We conclude with suggestions for better connecting LMs with the empirical study of child language acquisition.", }
The success of neural language models (LMs) on many technological tasks has brought about their potential relevance as scientific theories of language despite some clear differences between LM training and child language acquisition. In this paper we argue that some of the most prominent benchmarks for evaluating the syntactic capacities of LMs may not be sufficiently rigorous. In particular, we show that the template-based benchmarks lack the structural diversity commonly found in the theoretical and psychological studies of language. When trained on small-scale data modeling child language acquisition, the LMs can be readily matched by simple baseline models. We advocate for the use of the readily available, carefully curated datasets that have been evaluated for gradient acceptability by large pools of native speakers and are designed to probe the structural basis of grammar specifically. On one such dataset, the LI-Adger dataset, LMs evaluate sentences in a way inconsistent with human language users. We conclude with suggestions for better connecting LMs with the empirical study of child language acquisition.
[ "V{\\'a}zquez Mart{\\'\\i}nez, H{\\'e}ctor", "Lea Heuser, Annika", "Yang, Charles", "Kodner, Jordan" ]
Evaluating Neural Language Models as Cognitive Models of Language Acquisition
genbench-1.4
2310.20093
[ "https://github.com/hjvm/benchmarking_acquisition" ]
https://huggingface.co/papers/2310.20093
0
0
0
4
[]
[]
[]
1
Poster
https://aclanthology.org/2023.genbench-1.5.bib
https://aclanthology.org/2023.genbench-1.5/
@inproceedings{mondal-etal-2023-robust, title = "Robust Code Summarization", author = "Mondal, Debanjan and Lodha, Abhilasha and Sahoo, Ankita and Kumari, Beena", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.5", doi = "10.18653/v1/2023.genbench-1.5", pages = "65--75", abstract = "This paper delves into the intricacies of code summarization using advanced transformer-based language models. Through empirical studies, we evaluate the efficacy of code summarization by altering function and variable names to explore whether models truly understand code semantics or merely rely on textual cues. We have also introduced adversaries like dead code and commented code across three programming languages (Python, Javascript, and Java) to further scrutinize the model{'}s understanding. Ultimately, our research aims to offer valuable insights into the inner workings of transformer-based LMs, enhancing their ability to understand code and contributing to more efficient software development practices and maintenance workflows.", }
This paper delves into the intricacies of code summarization using advanced transformer-based language models. Through empirical studies, we evaluate the efficacy of code summarization by altering function and variable names to explore whether models truly understand code semantics or merely rely on textual cues. We have also introduced adversaries like dead code and commented code across three programming languages (Python, Javascript, and Java) to further scrutinize the model{'}s understanding. Ultimately, our research aims to offer valuable insights into the inner workings of transformer-based LMs, enhancing their ability to understand code and contributing to more efficient software development practices and maintenance workflows.
[ "Mondal, Debanjan", "Lodha, Abhilasha", "Sahoo, Ankita", "Kumari, Beena" ]
Robust Code Summarization
genbench-1.5
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.6.bib
https://aclanthology.org/2023.genbench-1.6/
@inproceedings{stepanova-ross-2023-temporal, title = "Temporal Generalizability in Multimodal Misinformation Detection", author = {Stepanova, Nataliya and Ross, Bj{\"o}rn}, editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.6", doi = "10.18653/v1/2023.genbench-1.6", pages = "76--88", abstract = "Misinformation detection models degrade in performance over time, but the precise causes of this remain under-researched, in particular for multimodal models. We present experiments investigating the impact of temporal shift on performance of multimodal automatic misinformation detection classifiers. Working with the r/Fakeddit dataset, we found that evaluating models on temporally out-of-domain data (i.e. data from time stretches unseen in training) results in a non-linear, 7-8{\%} drop in macro F1 as compared to traditional evaluation strategies (which do not control for the effect of content change over time). Focusing on two factors that make temporal generalizability in misinformation detection difficult, content shift and class distribution shift, we found that content shift has a stronger effect on recall. Within the context of coarse-grained vs. fine-grained misinformation detection with r/Fakeddit, we find that certain misinformation classes seem to be more stable with respect to content shift (e.g. Manipulated and Misleading Content). Our results indicate that future research efforts need to explicitly account for the temporal nature of misinformation to ensure that experiments reflect expected real-world performance.", }
Misinformation detection models degrade in performance over time, but the precise causes of this remain under-researched, in particular for multimodal models. We present experiments investigating the impact of temporal shift on performance of multimodal automatic misinformation detection classifiers. Working with the r/Fakeddit dataset, we found that evaluating models on temporally out-of-domain data (i.e. data from time stretches unseen in training) results in a non-linear, 7-8{\%} drop in macro F1 as compared to traditional evaluation strategies (which do not control for the effect of content change over time). Focusing on two factors that make temporal generalizability in misinformation detection difficult, content shift and class distribution shift, we found that content shift has a stronger effect on recall. Within the context of coarse-grained vs. fine-grained misinformation detection with r/Fakeddit, we find that certain misinformation classes seem to be more stable with respect to content shift (e.g. Manipulated and Misleading Content). Our results indicate that future research efforts need to explicitly account for the temporal nature of misinformation to ensure that experiments reflect expected real-world performance.
[ "Stepanova, Nataliya", "Ross, Bj{\\\"o}rn" ]
Temporal Generalizability in Multimodal Misinformation Detection
genbench-1.6
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.7.bib
https://aclanthology.org/2023.genbench-1.7/
@inproceedings{ginn-palmer-2023-robust, title = "Robust Generalization Strategies for Morpheme Glossing in an Endangered Language Documentation Context", author = "Ginn, Michael and Palmer, Alexis", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.7", doi = "10.18653/v1/2023.genbench-1.7", pages = "89--98", abstract = "Generalization is of particular importance in resource-constrained settings, where the available training data may represent only a small fraction of the distribution of possible texts. We investigate the ability of morpheme labeling models to generalize by evaluating their performance on unseen genres of text, and we experiment with strategies for closing the gap between performance on in-distribution and out-of-distribution data. Specifically, we use weight decay optimization, output denoising, and iterative pseudo-labeling, and achieve a 2{\%} improvement on a test set containing texts from unseen genres. All experiments are performed using texts written in the Mayan language Uspanteko.", }
Generalization is of particular importance in resource-constrained settings, where the available training data may represent only a small fraction of the distribution of possible texts. We investigate the ability of morpheme labeling models to generalize by evaluating their performance on unseen genres of text, and we experiment with strategies for closing the gap between performance on in-distribution and out-of-distribution data. Specifically, we use weight decay optimization, output denoising, and iterative pseudo-labeling, and achieve a 2{\%} improvement on a test set containing texts from unseen genres. All experiments are performed using texts written in the Mayan language Uspanteko.
[ "Ginn, Michael", "Palmer, Alexis" ]
Robust Generalization Strategies for Morpheme Glossing in an Endangered Language Documentation Context
genbench-1.7
2311.02777
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.8.bib
https://aclanthology.org/2023.genbench-1.8/
@inproceedings{hung-etal-2023-walking, title = "Walking a Tightrope {--} Evaluating Large Language Models in High-Risk Domains", author = "Hung, Chia-Chien and Ben Rim, Wiem and Frost, Lindsay and Bruckner, Lars and Lawrence, Carolin", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.8", doi = "10.18653/v1/2023.genbench-1.8", pages = "99--111", abstract = "High-risk domains pose unique challenges that require language models to provide accurate and safe responses. Despite the great success of large language models (LLMs), such as ChatGPT and its variants, their performance in high-risk domains remains unclear. Our study delves into an in-depth analysis of the performance of instruction-tuned LLMs, focusing on factual accuracy and safety adherence. To comprehensively assess the capabilities of LLMs, we conduct experiments on six NLP datasets including question answering and summarization tasks within two high-risk domains: legal and medical. Further qualitative analysis highlights the existing limitations inherent in current LLMs when evaluating in high-risk domains. This underscores the essential nature of not only improving LLM capabilities but also prioritizing the refinement of domain-specific metrics, and embracing a more human-centric approach to enhance safety and factual reliability. Our findings advance the field toward the concerns of properly evaluating LLMs in high-risk domains, aiming to steer the adaptability of LLMs in fulfilling societal obligations and aligning with forthcoming regulations, such as the EU AI Act.", }
High-risk domains pose unique challenges that require language models to provide accurate and safe responses. Despite the great success of large language models (LLMs), such as ChatGPT and its variants, their performance in high-risk domains remains unclear. Our study delves into an in-depth analysis of the performance of instruction-tuned LLMs, focusing on factual accuracy and safety adherence. To comprehensively assess the capabilities of LLMs, we conduct experiments on six NLP datasets including question answering and summarization tasks within two high-risk domains: legal and medical. Further qualitative analysis highlights the existing limitations inherent in current LLMs when evaluating in high-risk domains. This underscores the essential nature of not only improving LLM capabilities but also prioritizing the refinement of domain-specific metrics, and embracing a more human-centric approach to enhance safety and factual reliability. Our findings advance the field toward the concerns of properly evaluating LLMs in high-risk domains, aiming to steer the adaptability of LLMs in fulfilling societal obligations and aligning with forthcoming regulations, such as the EU AI Act.
[ "Hung, Chia-Chien", "Ben Rim, Wiem", "Frost, Lindsay", "Bruckner, Lars", "Lawrence, Carolin" ]
Walking a Tightrope – Evaluating Large Language Models in High-Risk Domains
genbench-1.8
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.9.bib
https://aclanthology.org/2023.genbench-1.9/
@inproceedings{zufle-etal-2023-latent, title = "Latent Feature-based Data Splits to Improve Generalisation Evaluation: A Hate Speech Detection Case Study", author = {Z{\"u}fle, Maike and Dankers, Verna and Titov, Ivan}, editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.9", doi = "10.18653/v1/2023.genbench-1.9", pages = "112--129", abstract = "With the ever-growing presence of social media platforms comes the increased spread of harmful content and the need for robust hate speech detection systems. Such systems easily overfit to specific targets and keywords, and evaluating them without considering distribution shifts that might occur between train and test data overestimates their benefit. We challenge hate speech models via new train-test splits of existing datasets that rely on the clustering of models{'} hidden representations. We present two split variants (Subset-Sum-Split and Closest-Split) that, when applied to two datasets using four pretrained models, reveal how models catastrophically fail on blind spots in the latent space. This result generalises when developing a split with one model and evaluating it on another. Our analysis suggests that there is no clear surface-level property of the data split that correlates with the decreased performance, which underscores that task difficulty is not always humanly interpretable. We recommend incorporating latent feature-based splits in model development and release two splits via the GenBench benchmark.", }
With the ever-growing presence of social media platforms comes the increased spread of harmful content and the need for robust hate speech detection systems. Such systems easily overfit to specific targets and keywords, and evaluating them without considering distribution shifts that might occur between train and test data overestimates their benefit. We challenge hate speech models via new train-test splits of existing datasets that rely on the clustering of models{'} hidden representations. We present two split variants (Subset-Sum-Split and Closest-Split) that, when applied to two datasets using four pretrained models, reveal how models catastrophically fail on blind spots in the latent space. This result generalises when developing a split with one model and evaluating it on another. Our analysis suggests that there is no clear surface-level property of the data split that correlates with the decreased performance, which underscores that task difficulty is not always humanly interpretable. We recommend incorporating latent feature-based splits in model development and release two splits via the GenBench benchmark.
[ "Z{\\\"u}fle, Maike", "Dankers, Verna", "Titov, Ivan" ]
Latent Feature-based Data Splits to Improve Generalisation Evaluation: A Hate Speech Detection Case Study
genbench-1.9
2311.10236
[ "https://github.com/maikezuefle/latent-feature-splits" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.10.bib
https://aclanthology.org/2023.genbench-1.10/
@inproceedings{kamali-kordjamshidi-2023-syntax, title = "Syntax-Guided Transformers: Elevating Compositional Generalization and Grounding in Multimodal Environments", author = "Kamali, Danial and Kordjamshidi, Parisa", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.10", doi = "10.18653/v1/2023.genbench-1.10", pages = "130--142", abstract = "Compositional generalization, the ability of intelligent models to extrapolate understanding of components to novel compositions, is a fundamental yet challenging facet in AI research, especially within multimodal environments. In this work, we address this challenge by exploiting the syntactic structure of language to boost compositional generalization. This paper elevates the importance of syntactic grounding, particularly through attention masking techniques derived from text input parsing. We introduce and evaluate the merits of using syntactic information in the multimodal grounding problem. Our results on grounded compositional generalization underscore the positive impact of dependency parsing across diverse tasks when utilized with Weight Sharing across the Transformer encoder. The results push the state-of-the-art in multimodal grounding and parameter-efficient modeling and provide insights for future research.", }
Compositional generalization, the ability of intelligent models to extrapolate understanding of components to novel compositions, is a fundamental yet challenging facet in AI research, especially within multimodal environments. In this work, we address this challenge by exploiting the syntactic structure of language to boost compositional generalization. This paper elevates the importance of syntactic grounding, particularly through attention masking techniques derived from text input parsing. We introduce and evaluate the merits of using syntactic information in the multimodal grounding problem. Our results on grounded compositional generalization underscore the positive impact of dependency parsing across diverse tasks when utilized with Weight Sharing across the Transformer encoder. The results push the state-of-the-art in multimodal grounding and parameter-efficient modeling and provide insights for future research.
[ "Kamali, Danial", "Kordjamshidi, Parisa" ]
Syntax-Guided Transformers: Elevating Compositional Generalization and Grounding in Multimodal Environments
genbench-1.10
2311.04364
[ "" ]
https://huggingface.co/papers/2311.04364
1
0
0
2
[]
[]
[]
1
Poster
https://aclanthology.org/2023.genbench-1.11.bib
https://aclanthology.org/2023.genbench-1.11/
@inproceedings{reymond-steinert-threlkeld-2023-mscan, title = "m{SCAN}: A Dataset for Multilingual Compositional Generalisation Evaluation", author = "Reymond, Am{\'e}lie and Steinert-Threlkeld, Shane", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.11", doi = "10.18653/v1/2023.genbench-1.11", pages = "143--151", abstract = "Language models achieve remarkable results on a variety of tasks, yet still struggle on compositional generalisation benchmarks. The majority of these benchmarks evaluate performance in English only, leaving us with the question of whether these results generalise to other languages. As an initial step to answering this question, we introduce mSCAN, a multilingual adaptation of the SCAN dataset. It was produced by a rule-based translation, developed in cooperation with native speakers. We then showcase this novel dataset on some in-context learning experiments, and GPT3.5 and the multilingual large language model BLOOM", }
Language models achieve remarkable results on a variety of tasks, yet still struggle on compositional generalisation benchmarks. The majority of these benchmarks evaluate performance in English only, leaving us with the question of whether these results generalise to other languages. As an initial step to answering this question, we introduce mSCAN, a multilingual adaptation of the SCAN dataset. It was produced by a rule-based translation, developed in cooperation with native speakers. We then showcase this novel dataset on some in-context learning experiments, and GPT3.5 and the multilingual large language model BLOOM
[ "Reymond, Am{\\'e}lie", "Steinert-Threlkeld, Shane" ]
mSCAN: A Dataset for Multilingual Compositional Generalisation Evaluation
genbench-1.11
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.12.bib
https://aclanthology.org/2023.genbench-1.12/
@inproceedings{wilson-frank-2023-inductive, title = "Inductive Bias Is in the Eye of the Beholder", author = "Wilson, Michael and Frank, Robert", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.12", doi = "10.18653/v1/2023.genbench-1.12", pages = "152--162", abstract = "Due to the finite nature of any evidence used in learning, systematic generalization is crucially reliant on the presence of inductive bias (Mitchell, 1980). We examine inductive biases in different types of sequence-to-sequence neural network models, including CNNs, LSTMs (with and without attention), and transformers, inspired by Kharitonov and Chaabouni (2021). Crucially, however, we consider a wider range of possible inductive biases than their study did. Investigating preferences for hierarchical generalization compared to other types of generalization, we find that, contrary to their results, transformers display no preference for hierarchical generalization, but instead prefer a counting strategy. We also investigate biases toward different types of compositionality. By controlling for a confound in Kharitonov and Chaabouni (2021){'}s test set, we find much less consistent generalization overall, and find that a large number of responses were among types other than the two types of generalization they had considered. Nevertheless, we observe consistent compositional generalization to held out combinations of primitives and functions on a SCAN task (Lake and Baroni, 2017) by models of all types, but only when primitives occur with other functions in the training set. The pattern of success indicates generalization in models of these types is highly sensitive to distributional properties of their training data.", }
Due to the finite nature of any evidence used in learning, systematic generalization is crucially reliant on the presence of inductive bias (Mitchell, 1980). We examine inductive biases in different types of sequence-to-sequence neural network models, including CNNs, LSTMs (with and without attention), and transformers, inspired by Kharitonov and Chaabouni (2021). Crucially, however, we consider a wider range of possible inductive biases than their study did. Investigating preferences for hierarchical generalization compared to other types of generalization, we find that, contrary to their results, transformers display no preference for hierarchical generalization, but instead prefer a counting strategy. We also investigate biases toward different types of compositionality. By controlling for a confound in Kharitonov and Chaabouni (2021){'}s test set, we find much less consistent generalization overall, and find that a large number of responses were among types other than the two types of generalization they had considered. Nevertheless, we observe consistent compositional generalization to held out combinations of primitives and functions on a SCAN task (Lake and Baroni, 2017) by models of all types, but only when primitives occur with other functions in the training set. The pattern of success indicates generalization in models of these types is highly sensitive to distributional properties of their training data.
[ "Wilson, Michael", "Frank, Robert" ]
Inductive Bias Is in the Eye of the Beholder
genbench-1.12
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.13.bib
https://aclanthology.org/2023.genbench-1.13/
@inproceedings{merlo-etal-2023-blackbird, title = "Blackbird Language Matrices Tasks for Generalization", author = "Merlo, Paola and Jiang, Chunyang and Samo, Giuseppe and Nastase, Vivi", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.13", doi = "10.18653/v1/2023.genbench-1.13", pages = "163--172", abstract = "To develop a system with near-human language capabilities, we need to understand current systems{'} generalisation and compositional abilities. We approach this by generating compositional, structured data, inspired from visual intelligence tests, that depend on the problem-solvers being able to disentangle objects and their absolute and relative properties in a sequence of images. We design an analogous task and develop the corresponding datasets that capture specific linguistic phenomena and their properties. Solving each problem instance depends on detecting the relevant linguistic objects and generative rules of the problem. We propose two datasets modelling two linguistic phenomena {--} subject-verb agreement in French, and verb alternations in English. The datasets can be used to investigate how LLMs encode linguistic objects, such as phrases, their grammatical and semantic properties, such as number or semantic role, and how such information is combined to correctly solve each problem. Specifically generated error types help investigate the behaviour of the system, which important information it is able to detect, and which structures mislead it.", }
To develop a system with near-human language capabilities, we need to understand current systems{'} generalisation and compositional abilities. We approach this by generating compositional, structured data, inspired from visual intelligence tests, that depend on the problem-solvers being able to disentangle objects and their absolute and relative properties in a sequence of images. We design an analogous task and develop the corresponding datasets that capture specific linguistic phenomena and their properties. Solving each problem instance depends on detecting the relevant linguistic objects and generative rules of the problem. We propose two datasets modelling two linguistic phenomena {--} subject-verb agreement in French, and verb alternations in English. The datasets can be used to investigate how LLMs encode linguistic objects, such as phrases, their grammatical and semantic properties, such as number or semantic role, and how such information is combined to correctly solve each problem. Specifically generated error types help investigate the behaviour of the system, which important information it is able to detect, and which structures mislead it.
[ "Merlo, Paola", "Jiang, Chunyang", "Samo, Giuseppe", "Nastase, Vivi" ]
Blackbird Language Matrices Tasks for Generalization
genbench-1.13
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.14.bib
https://aclanthology.org/2023.genbench-1.14/
@inproceedings{milios-etal-2023-context, title = "In-Context Learning for Text Classification with Many Labels", author = "Milios, Aristides and Reddy, Siva and Bahdanau, Dzmitry", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.14", doi = "10.18653/v1/2023.genbench-1.14", pages = "173--184", abstract = "In-context learning (ICL) using large language models for tasks with many labels is challenging due to the limited context window, which makes it difficult to fit a sufficient number of examples in the prompt. In this paper, we use a pre-trained dense retrieval model to bypass this limitation, giving the model only a partial view of the full label space for each inference call. Testing with recent open-source LLMs (OPT, LLaMA), we set new state of the art performance in few-shot settings for three common intent classification datasets, with no fine-tuning. We also surpass fine-tuned performance on fine-grained sentiment classification in certain cases. We analyze the performance across number of in-context examples and different model scales, showing that larger models are necessary to effectively make use of larger context lengths for ICL. By running several ablations, we analyze the model{'}s use of: a) the similarity of the in-context examples to the current input, b) the semantic content of the class names, and c) the correct correspondence between examples and labels. We demonstrate that all three are needed to varying degrees depending on the domain, contrary to certain recent works.", }
In-context learning (ICL) using large language models for tasks with many labels is challenging due to the limited context window, which makes it difficult to fit a sufficient number of examples in the prompt. In this paper, we use a pre-trained dense retrieval model to bypass this limitation, giving the model only a partial view of the full label space for each inference call. Testing with recent open-source LLMs (OPT, LLaMA), we set new state of the art performance in few-shot settings for three common intent classification datasets, with no fine-tuning. We also surpass fine-tuned performance on fine-grained sentiment classification in certain cases. We analyze the performance across number of in-context examples and different model scales, showing that larger models are necessary to effectively make use of larger context lengths for ICL. By running several ablations, we analyze the model{'}s use of: a) the similarity of the in-context examples to the current input, b) the semantic content of the class names, and c) the correct correspondence between examples and labels. We demonstrate that all three are needed to varying degrees depending on the domain, contrary to certain recent works.
[ "Milios, Aristides", "Reddy, Siva", "Bahdanau, Dzmitry" ]
In-Context Learning for Text Classification with Many Labels
genbench-1.14
2309.10954
[ "" ]
https://huggingface.co/papers/2309.10954
0
0
0
3
[]
[]
[]
1
Poster
https://aclanthology.org/2023.genbench-1.15.bib
https://aclanthology.org/2023.genbench-1.15/
@inproceedings{zhifei-wang-steinert-threlkeld-2023-gqg, title = "{GQG}: Generalized Quantifier Generalization - A Dataset for Evaluating Quantifier Semantics Understanding in Language Models", author = "Zhifei Wang, Leroy and Steinert-Threlkeld, Shane", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.15", doi = "10.18653/v1/2023.genbench-1.15", pages = "185--192", abstract = "We present a new dataset consisting of various quantifier expressions to evaluate the generalization abilities of language models. The dataset contains 18,360 prompts encompassing diverse quantifiers, forming the basis of a new framework for assessing semantic understanding in this domain. We test the effectiveness of our dataset using Pythia models, ranging from 410 million to 6.9 billion, showing that quantifier-based tasks can be challenging for current language models. We make our code and data publicly available, such that the dataset can be easily extended or updated based on different evaluation needs.", }
We present a new dataset consisting of various quantifier expressions to evaluate the generalization abilities of language models. The dataset contains 18,360 prompts encompassing diverse quantifiers, forming the basis of a new framework for assessing semantic understanding in this domain. We test the effectiveness of our dataset using Pythia models, ranging from 410 million to 6.9 billion, showing that quantifier-based tasks can be challenging for current language models. We make our code and data publicly available, such that the dataset can be easily extended or updated based on different evaluation needs.
[ "Zhifei Wang, Leroy", "Steinert-Threlkeld, Shane" ]
GQG: Generalized Quantifier Generalization - A Dataset for Evaluating Quantifier Semantics Understanding in Language Models
genbench-1.15
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.16.bib
https://aclanthology.org/2023.genbench-1.16/
@inproceedings{pengpun-etal-2023-cross, title = "Cross-Lingual Data Augmentation For {T}hai Question-Answering", author = "Pengpun, Parinthapat and Udomcharoenchaikit, Can and Buaphet, Weerayut and Limkonchotiwat, Peerat", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.16", doi = "10.18653/v1/2023.genbench-1.16", pages = "193--203", abstract = "This paper presents an innovative data augmentation framework with data quality control designed to enhance the robustness of Question Answering (QA) models in low-resource languages, particularly Thai. Recognizing the challenges posed by the scarcity and quality of training data, we leverage data augmentation techniques in both monolingual and cross-lingual settings. Our approach augments and enriches the original dataset, thereby increasing its linguistic diversity and robustness. We evaluate the robustness of our framework on Machine Reading Comprehension, and the experimental results illustrate the potential of data augmentation to effectively increase training data and improve model generalization in low-resource language settings, offering a promising direction for the data augmentation manner.", }
This paper presents an innovative data augmentation framework with data quality control designed to enhance the robustness of Question Answering (QA) models in low-resource languages, particularly Thai. Recognizing the challenges posed by the scarcity and quality of training data, we leverage data augmentation techniques in both monolingual and cross-lingual settings. Our approach augments and enriches the original dataset, thereby increasing its linguistic diversity and robustness. We evaluate the robustness of our framework on Machine Reading Comprehension, and the experimental results illustrate the potential of data augmentation to effectively increase training data and improve model generalization in low-resource language settings, offering a promising direction for the data augmentation manner.
[ "Pengpun, Parinthapat", "Udomcharoenchaikit, Can", "Buaphet, Weerayut", "Limkonchotiwat, Peerat" ]
Cross-Lingual Data Augmentation For Thai Question-Answering
genbench-1.16
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.17.bib
https://aclanthology.org/2023.genbench-1.17/
@inproceedings{moisio-etal-2023-using, title = "On using distribution-based compositionality assessment to evaluate compositional generalisation in machine translation", author = "Moisio, Anssi and Creutz, Mathias and Kurimo, Mikko", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.17", doi = "10.18653/v1/2023.genbench-1.17", pages = "204--213", abstract = "Compositional generalisation (CG), in NLP and in machine learning more generally, has been assessed mostly using artificial datasets. It is important to develop benchmarks to assess CG also in real-world natural language tasks in order to understand the abilities and limitations of systems deployed in the wild. To this end, our GenBench Collaborative Benchmarking Task submission utilises the distribution-based compositionality assessment (DBCA) framework to split the Europarl translation corpus into a training and a test set in such a way that the test set requires compositional generalisation capacity. Specifically, the training and test sets have divergent distributions of dependency relations, testing NMT systems{'} capability of translating dependencies that they have not been trained on. This is a fully-automated procedure to create natural language compositionality benchmarks, making it simple and inexpensive to apply it further to other datasets and languages. The code and data for the experiments is available at https://github.com/aalto-speech/dbca.", }
Compositional generalisation (CG), in NLP and in machine learning more generally, has been assessed mostly using artificial datasets. It is important to develop benchmarks to assess CG also in real-world natural language tasks in order to understand the abilities and limitations of systems deployed in the wild. To this end, our GenBench Collaborative Benchmarking Task submission utilises the distribution-based compositionality assessment (DBCA) framework to split the Europarl translation corpus into a training and a test set in such a way that the test set requires compositional generalisation capacity. Specifically, the training and test sets have divergent distributions of dependency relations, testing NMT systems{'} capability of translating dependencies that they have not been trained on. This is a fully-automated procedure to create natural language compositionality benchmarks, making it simple and inexpensive to apply it further to other datasets and languages. The code and data for the experiments is available at https://github.com/aalto-speech/dbca.
[ "Moisio, Anssi", "Creutz, Mathias", "Kurimo, Mikko" ]
On using distribution-based compositionality assessment to evaluate compositional generalisation in machine translation
genbench-1.17
2311.08249
[ "https://github.com/aalto-speech/dbca" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.genbench-1.18.bib
https://aclanthology.org/2023.genbench-1.18/
@inproceedings{somov-tutubalina-2023-shifted, title = "Shifted {PAUQ}: Distribution shift in text-to-{SQL}", author = "Somov, Oleg and Tutubalina, Elena", editor = "Hupkes, Dieuwke and Dankers, Verna and Batsuren, Khuyagbaatar and Sinha, Koustuv and Kazemnejad, Amirhossein and Christodoulopoulos, Christos and Cotterell, Ryan and Bruni, Elia", booktitle = "Proceedings of the 1st GenBench Workshop on (Benchmarking) Generalisation in NLP", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.genbench-1.18", doi = "10.18653/v1/2023.genbench-1.18", pages = "214--220", abstract = "Semantic parsing plays a pivotal role in advancing the accessibility of human-computer interaction on a large scale. Spider, a widely recognized dataset for text2SQL, contains a wide range of natural language (NL) questions in English and corresponding SQL queries. Original splits of Spider and its adapted to Russian language and improved version, PAUQ, assume independence and identical distribution of training and testing data (i.i.d split). In this work, we propose a target length split and multilingual i.i.d split to measure compositionality and cross-language generalization. We present experimental results of popular text2SQL models on original, multilingual, and target length splits. We also construct a context-free grammar for the evaluation of compositionality in text2SQL in an out-of-distribution setting. We make the splits publicly available on HuggingFace hub via https://huggingface.co/datasets/composite/pauq", }
Semantic parsing plays a pivotal role in advancing the accessibility of human-computer interaction on a large scale. Spider, a widely recognized dataset for text2SQL, contains a wide range of natural language (NL) questions in English and corresponding SQL queries. Original splits of Spider and its adapted to Russian language and improved version, PAUQ, assume independence and identical distribution of training and testing data (i.i.d split). In this work, we propose a target length split and multilingual i.i.d split to measure compositionality and cross-language generalization. We present experimental results of popular text2SQL models on original, multilingual, and target length splits. We also construct a context-free grammar for the evaluation of compositionality in text2SQL in an out-of-distribution setting. We make the splits publicly available on HuggingFace hub via https://huggingface.co/datasets/composite/pauq
[ "Somov, Oleg", "Tutubalina, Elena" ]
Shifted PAUQ: Distribution shift in text-to-SQL
genbench-1.18
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.1.bib
https://aclanthology.org/2023.lchange-1.1/
@inproceedings{ehrenworth-keith-2023-literary-intertextual, title = "Literary Intertextual Semantic Change Detection: Application and Motivation for Evaluating Models on Small Corpora", author = "Ehrenworth, Jackson and Keith, Katherine", editor = "Tahmasebi, Nina and Montariol, Syrielle and Dubossarsky, Haim and Kutuzov, Andrey and Hengchen, Simon and Alfter, David and Periti, Francesco and Cassotti, Pierluigi", booktitle = "Proceedings of the 4th Workshop on Computational Approaches to Historical Language Change", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.lchange-1.1", doi = "10.18653/v1/2023.lchange-1.1", pages = "1--14", abstract = "Lexical semantic change detection is the study of how words change meaning between corpora. While Schlechtweg et al. (2020) standardized both datasets and evaluation metrics for this shared task, for those interested in applying semantic change detection models to small corpora{---}e.g., in the digital humanities{---}there is a need for evaluation involving much smaller datasets. We present a method and open-source code pipeline for downsampling the SemEval-2020 Task 1 corpora while preserving gold standard measures of semantic change. We then evaluate several state-of-the-art models trained on these downsampled corpora and find both dramatically decreased performance (average 67{\%} decrease) and high variance. We also propose a novel application to the digital humanities and provide a case study demonstrating that semantic change detection can be used in an exploratory manner to produce insightful avenues of investigation for literary scholars.", }
Lexical semantic change detection is the study of how words change meaning between corpora. While Schlechtweg et al. (2020) standardized both datasets and evaluation metrics for this shared task, for those interested in applying semantic change detection models to small corpora{---}e.g., in the digital humanities{---}there is a need for evaluation involving much smaller datasets. We present a method and open-source code pipeline for downsampling the SemEval-2020 Task 1 corpora while preserving gold standard measures of semantic change. We then evaluate several state-of-the-art models trained on these downsampled corpora and find both dramatically decreased performance (average 67{\%} decrease) and high variance. We also propose a novel application to the digital humanities and provide a case study demonstrating that semantic change detection can be used in an exploratory manner to produce insightful avenues of investigation for literary scholars.
[ "Ehrenworth, Jackson", "Keith, Katherine" ]
Literary Intertextual Semantic Change Detection: Application and Motivation for Evaluating Models on Small Corpora
lchange-1.1
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.2.bib
https://aclanthology.org/2023.lchange-1.2/
@inproceedings{lendvai-etal-2023-domain-adapting, title = "Domain-Adapting {BERT} for Attributing Manuscript, Century and Region in Pre-{M}odern {S}lavic Texts", author = "Lendvai, Piroska and Reichel, Uwe and Jouravel, Anna and Rabus, Achim and Renje, Elena", editor = "Tahmasebi, Nina and Montariol, Syrielle and Dubossarsky, Haim and Kutuzov, Andrey and Hengchen, Simon and Alfter, David and Periti, Francesco and Cassotti, Pierluigi", booktitle = "Proceedings of the 4th Workshop on Computational Approaches to Historical Language Change", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.lchange-1.2", doi = "10.18653/v1/2023.lchange-1.2", pages = "15--21", abstract = "Our study presents a stratified dataset compiled from six different Slavic bodies of text, for cross-linguistic and diachronic analyses of Slavic Pre-Modern language variants. We demonstrate unsupervised domain adaptation and supervised finetuning of BERT on these low-resource, historical Slavic variants, for the purposes of provenance attribution in terms of three downstream tasks: manuscript, century and copying region classification.The data compilation aims to capture diachronic as well as regional language variation and change: the texts were written in the course of roughly a millennium, incorporating language variants from the High Middle Ages to the Early Modern Period, and originate from a variety of geographic regions. Mechanisms of language change in relatively small portions of such data have been inspected, analyzed and typologized by Slavists manually; our contribution aims to investigate the extent to which the BERT transformer architecture and pretrained models can benefit this process. Using these datasets for domain adaptation, we could attribute temporal, geographical and manuscript origin on the level of text snippets with high F-scores. We also conducted a qualitative analysis of the models{'} misclassifications.", }
Our study presents a stratified dataset compiled from six different Slavic bodies of text, for cross-linguistic and diachronic analyses of Slavic Pre-Modern language variants. We demonstrate unsupervised domain adaptation and supervised finetuning of BERT on these low-resource, historical Slavic variants, for the purposes of provenance attribution in terms of three downstream tasks: manuscript, century and copying region classification.The data compilation aims to capture diachronic as well as regional language variation and change: the texts were written in the course of roughly a millennium, incorporating language variants from the High Middle Ages to the Early Modern Period, and originate from a variety of geographic regions. Mechanisms of language change in relatively small portions of such data have been inspected, analyzed and typologized by Slavists manually; our contribution aims to investigate the extent to which the BERT transformer architecture and pretrained models can benefit this process. Using these datasets for domain adaptation, we could attribute temporal, geographical and manuscript origin on the level of text snippets with high F-scores. We also conducted a qualitative analysis of the models{'} misclassifications.
[ "Lendvai, Piroska", "Reichel, Uwe", "Jouravel, Anna", "Rabus, Achim", "Renje, Elena" ]
Domain-Adapting BERT for Attributing Manuscript, Century and Region in Pre-Modern Slavic Texts
lchange-1.2
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.3.bib
https://aclanthology.org/2023.lchange-1.3/
@inproceedings{list-etal-2023-representing-computing, title = "Representing and Computing Uncertainty in Phonological Reconstruction", author = "List, Johann-Mattis and Hill, Nathan and Forkel, Robert and Blum, Frederic", editor = "Tahmasebi, Nina and Montariol, Syrielle and Dubossarsky, Haim and Kutuzov, Andrey and Hengchen, Simon and Alfter, David and Periti, Francesco and Cassotti, Pierluigi", booktitle = "Proceedings of the 4th Workshop on Computational Approaches to Historical Language Change", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.lchange-1.3", doi = "10.18653/v1/2023.lchange-1.3", pages = "22--32", abstract = "Despite the inherently fuzzy nature of reconstructions in historical linguistics, most scholars do not represent their uncertainty when proposing proto-forms. With the increasing success of recently proposed approaches to automating certain aspects of the traditional comparative method, the formal representation of proto-forms has also improved. This formalization makes it possible to address both the representation and the computation of uncertainty. Building on recent advances in supervised phonological reconstruction, during which an algorithm learns how to reconstruct words in a given proto-language relying on previously annotated data, and inspired by improved methods for automated word prediction from cognate sets, we present a new framework that allows for the representation of uncertainty in linguistic reconstruction and also includes a workflow for the computation of fuzzy reconstructions from linguistic data.", }
Despite the inherently fuzzy nature of reconstructions in historical linguistics, most scholars do not represent their uncertainty when proposing proto-forms. With the increasing success of recently proposed approaches to automating certain aspects of the traditional comparative method, the formal representation of proto-forms has also improved. This formalization makes it possible to address both the representation and the computation of uncertainty. Building on recent advances in supervised phonological reconstruction, during which an algorithm learns how to reconstruct words in a given proto-language relying on previously annotated data, and inspired by improved methods for automated word prediction from cognate sets, we present a new framework that allows for the representation of uncertainty in linguistic reconstruction and also includes a workflow for the computation of fuzzy reconstructions from linguistic data.
[ "List, Johann-Mattis", "Hill, Nathan", "Forkel, Robert", "Blum, Frederic" ]
Representing and Computing Uncertainty in Phonological Reconstruction
lchange-1.3
2310.12727
[ "https://github.com/lingpy/fuzzy" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.4.bib
https://aclanthology.org/2023.lchange-1.4/
@inproceedings{beck-kollner-2023-ghisbert-training, title = "{GH}is{BERT} {--} Training {BERT} from scratch for lexical semantic investigations across historical {G}erman language stages", author = {Beck, Christin and K{\"o}llner, Marisa}, editor = "Tahmasebi, Nina and Montariol, Syrielle and Dubossarsky, Haim and Kutuzov, Andrey and Hengchen, Simon and Alfter, David and Periti, Francesco and Cassotti, Pierluigi", booktitle = "Proceedings of the 4th Workshop on Computational Approaches to Historical Language Change", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.lchange-1.4", doi = "10.18653/v1/2023.lchange-1.4", pages = "33--45", abstract = "While static embeddings have dominated computational approaches to lexical semantic change for quite some time, recent approaches try to leverage the contextualized embeddings generated by the language model BERT for identifying semantic shifts in historical texts. However, despite their usability for detecting changes in the more recent past, it remains unclear how well language models scale to investigations going back further in time, where the language differs substantially from the training data underlying the models. In this paper, we present GHisBERT, a BERT-based language model trained from scratch on historical data covering all attested stages of German (going back to Old High German, c. 750 CE). Given a lack of ground truth data for investigating lexical semantic change across historical German language stages, we evaluate our model via a lexical similarity analysis of ten stable concepts. We show that, in comparison with an unmodified and a fine-tuned German BERT-base model, our model performs best in terms of assessing inter-concept similarity as well as intra-concept similarity over time. This in turn argues for the necessity of pre-training historical language models from scratch when working with historical linguistic data.", }
While static embeddings have dominated computational approaches to lexical semantic change for quite some time, recent approaches try to leverage the contextualized embeddings generated by the language model BERT for identifying semantic shifts in historical texts. However, despite their usability for detecting changes in the more recent past, it remains unclear how well language models scale to investigations going back further in time, where the language differs substantially from the training data underlying the models. In this paper, we present GHisBERT, a BERT-based language model trained from scratch on historical data covering all attested stages of German (going back to Old High German, c. 750 CE). Given a lack of ground truth data for investigating lexical semantic change across historical German language stages, we evaluate our model via a lexical similarity analysis of ten stable concepts. We show that, in comparison with an unmodified and a fine-tuned German BERT-base model, our model performs best in terms of assessing inter-concept similarity as well as intra-concept similarity over time. This in turn argues for the necessity of pre-training historical language models from scratch when working with historical linguistic data.
[ "Beck, Christin", "K{\\\"o}llner, Marisa" ]
GHisBERT – Training BERT from scratch for lexical semantic investigations across historical German language stages
lchange-1.4
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.5.bib
https://aclanthology.org/2023.lchange-1.5/
@inproceedings{jafari-etal-2023-longitudinal-study, title = "A longitudinal study about gradual changes in the {I}ranian Online Public Sphere pre and post of {`}Mahsa Moment{'}: Focusing on {T}witter", author = "Jafari, Sadegh and Fathi, Amin and Hajizadegan, Abolfazl and Kazemeini, Amirmohammad and Eetemadi, Sauleh", editor = "Tahmasebi, Nina and Montariol, Syrielle and Dubossarsky, Haim and Kutuzov, Andrey and Hengchen, Simon and Alfter, David and Periti, Francesco and Cassotti, Pierluigi", booktitle = "Proceedings of the 4th Workshop on Computational Approaches to Historical Language Change", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.lchange-1.5", doi = "10.18653/v1/2023.lchange-1.5", pages = "46--52", abstract = "Mahsa Amini{'}s death shocked Iranian society. The effects of this event and the subsequent tragedies in Iran not only in realspace but also in cyberspace, including Twitter, were tremendous and unimaginable. We explore how Twitter has changed after Mahsa Amini{'}s death by analyzing the sentiments of Iranian users in the 90 days after this event. Additionally, we track the change in word meaning and each word{'}s neighboring words. Finally, we use word clustering methods for topic modeling.", }
Mahsa Amini{'}s death shocked Iranian society. The effects of this event and the subsequent tragedies in Iran not only in realspace but also in cyberspace, including Twitter, were tremendous and unimaginable. We explore how Twitter has changed after Mahsa Amini{'}s death by analyzing the sentiments of Iranian users in the 90 days after this event. Additionally, we track the change in word meaning and each word{'}s neighboring words. Finally, we use word clustering methods for topic modeling.
[ "Jafari, Sadegh", "Fathi, Amin", "Hajizadegan, Abolfazl", "Kazemeini, Amirmohammad", "Eetemadi, Sauleh" ]
A longitudinal study about gradual changes in the Iranian Online Public Sphere pre and post of `Mahsa Moment': Focusing on Twitter
lchange-1.5
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.6.bib
https://aclanthology.org/2023.lchange-1.6/
@inproceedings{boholm-sayeed-2023-political-dogwhistles, title = "Political dogwhistles and community divergence in semantic change", author = "Boholm, Max and Sayeed, Asad", editor = "Tahmasebi, Nina and Montariol, Syrielle and Dubossarsky, Haim and Kutuzov, Andrey and Hengchen, Simon and Alfter, David and Periti, Francesco and Cassotti, Pierluigi", booktitle = "Proceedings of the 4th Workshop on Computational Approaches to Historical Language Change", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.lchange-1.6", doi = "10.18653/v1/2023.lchange-1.6", pages = "53--65", abstract = "We test whether the development of political dogwhistles can be observed using language change measures; specifically, does the development of a {``}hidden{''} message in a dogwhistle show up as differences in semantic change between communities over time? We take Swedish-language dogwhistles related to the on-going immigration debate and measure differences over time in their rate of semantic change between two Swedish-language community forums, Flashback and Familjeliv, the former representing an in-group for understanding the {``}hidden{''} meaning of the dogwhistles. We find that multiple measures are sensitive enough to detect differences over time, in that the meaning changes in Flashback over the relevant time period but not in Familjeliv. We also examine the sensitivity of multiple modeling approaches to semantic change in the matter of community divergence.", }
We test whether the development of political dogwhistles can be observed using language change measures; specifically, does the development of a {``}hidden{''} message in a dogwhistle show up as differences in semantic change between communities over time? We take Swedish-language dogwhistles related to the on-going immigration debate and measure differences over time in their rate of semantic change between two Swedish-language community forums, Flashback and Familjeliv, the former representing an in-group for understanding the {``}hidden{''} meaning of the dogwhistles. We find that multiple measures are sensitive enough to detect differences over time, in that the meaning changes in Flashback over the relevant time period but not in Familjeliv. We also examine the sensitivity of multiple modeling approaches to semantic change in the matter of community divergence.
[ "Boholm, Max", "Sayeed, Asad" ]
Political dogwhistles and community divergence in semantic change
lchange-1.6
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.7.bib
https://aclanthology.org/2023.lchange-1.7/
@inproceedings{dehouck-etal-2023-evosem-database, title = "{E}vo{S}em: A database of polysemous cognate sets", author = "Dehouck, Mathieu and Fran{\c{c}}ois, Alex and Kalyan, Siva and Pastor, Martial and Kletz, David", editor = "Tahmasebi, Nina and Montariol, Syrielle and Dubossarsky, Haim and Kutuzov, Andrey and Hengchen, Simon and Alfter, David and Periti, Francesco and Cassotti, Pierluigi", booktitle = "Proceedings of the 4th Workshop on Computational Approaches to Historical Language Change", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.lchange-1.7", doi = "10.18653/v1/2023.lchange-1.7", pages = "66--75", abstract = "Polysemies, or {``}colexifications{''}, are of great interest in cognitive and historical linguistics, since meanings that are frequently expressed by the same lexeme are likely to be conceptually similar, and lie along a common pathway of semantic change. We argue that these types of inferences can be more reliably drawn from polysemies of cognate sets (which we call {``}dialexifications{''}) than from polysemies of lexemes. After giving a precise definition of dialexification, we introduce Evosem, a cross-linguistic database of etymologies scraped from several online sources. Based on this database, we measure for each pair of senses how many cognate sets include them both {---} i.e. how often this pair of senses is {``}dialexified{''}. This allows us to construct a weighted dialexification graph for any set of senses, indicating the conceptual and historical closeness of each pair. We also present an online interface for browsing our database, including graphs and interactive tables. We then discuss potential applications to NLP tasks and to linguistic research.", }
Polysemies, or {``}colexifications{''}, are of great interest in cognitive and historical linguistics, since meanings that are frequently expressed by the same lexeme are likely to be conceptually similar, and lie along a common pathway of semantic change. We argue that these types of inferences can be more reliably drawn from polysemies of cognate sets (which we call {``}dialexifications{''}) than from polysemies of lexemes. After giving a precise definition of dialexification, we introduce Evosem, a cross-linguistic database of etymologies scraped from several online sources. Based on this database, we measure for each pair of senses how many cognate sets include them both {---} i.e. how often this pair of senses is {``}dialexified{''}. This allows us to construct a weighted dialexification graph for any set of senses, indicating the conceptual and historical closeness of each pair. We also present an online interface for browsing our database, including graphs and interactive tables. We then discuss potential applications to NLP tasks and to linguistic research.
[ "Dehouck, Mathieu", "Fran{\\c{c}}ois, Alex", "Kalyan, Siva", "Pastor, Martial", "Kletz, David" ]
EvoSem: A database of polysemous cognate sets
lchange-1.7
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.8.bib
https://aclanthology.org/2023.lchange-1.8/
@inproceedings{afanasev-2023-multi-lect, title = "Multi-lect automatic detection of {S}wadesh list items from raw corpus data in {E}ast {S}lavic languages", author = "Afanasev, Ilia", editor = "Tahmasebi, Nina and Montariol, Syrielle and Dubossarsky, Haim and Kutuzov, Andrey and Hengchen, Simon and Alfter, David and Periti, Francesco and Cassotti, Pierluigi", booktitle = "Proceedings of the 4th Workshop on Computational Approaches to Historical Language Change", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.lchange-1.8", doi = "10.18653/v1/2023.lchange-1.8", pages = "76--86", abstract = "The article introduces a novel task of multi-lect automatic detection of Swadesh list items from raw corpora. The task aids the early stageof historical linguistics study by helping the researcher compile word lists for further analysis.In this paper, I test multi-lect automatic detection on the East Slavic lects{'} data. The training data consists of Ukrainian, Belarusian, and Russian material. I introduce a new dataset for the Ukrainian language. I implement data augmentation techniques to give automatic tools a better understanding of the searched value. The test data consists of the Old East Slavic texts.I train HMM, CRF, and mBERT models, then test and evaluate them by harmonic F1 score. The baseline is a Random Forest classifier. I introduce two different subtasks: the search for new Swadesh list items, and the search for the known Swadesh list items in new lects of the well-established group. The first subtask, given the simultaneously diverse and vague nature of the Swadesh list, currently presents an almost unbeatable challenge for machine learning methods. The second subtask, on the other hand, is easier, and the mBERT model achieves a 0.57 F1 score. This is an impressive result, given how hard it is to formalise the token belonging to a very specific and thematically diverse set of concepts.", }
The article introduces a novel task of multi-lect automatic detection of Swadesh list items from raw corpora. The task aids the early stageof historical linguistics study by helping the researcher compile word lists for further analysis.In this paper, I test multi-lect automatic detection on the East Slavic lects{'} data. The training data consists of Ukrainian, Belarusian, and Russian material. I introduce a new dataset for the Ukrainian language. I implement data augmentation techniques to give automatic tools a better understanding of the searched value. The test data consists of the Old East Slavic texts.I train HMM, CRF, and mBERT models, then test and evaluate them by harmonic F1 score. The baseline is a Random Forest classifier. I introduce two different subtasks: the search for new Swadesh list items, and the search for the known Swadesh list items in new lects of the well-established group. The first subtask, given the simultaneously diverse and vague nature of the Swadesh list, currently presents an almost unbeatable challenge for machine learning methods. The second subtask, on the other hand, is easier, and the mBERT model achieves a 0.57 F1 score. This is an impressive result, given how hard it is to formalise the token belonging to a very specific and thematically diverse set of concepts.
[ "Afanasev, Ilia" ]
Multi-lect automatic detection of Swadesh list items from raw corpus data in East Slavic languages
lchange-1.8
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.9.bib
https://aclanthology.org/2023.lchange-1.9/
@inproceedings{adams-etal-2023-anchors-embedding, title = "Anchors in Embedding Space: A Simple Concept Tracking Approach to Support Conceptual History Research", author = "Adams, Jetske and Larson, Martha and Verheul, Jaap and Boyden, Michael", editor = "Tahmasebi, Nina and Montariol, Syrielle and Dubossarsky, Haim and Kutuzov, Andrey and Hengchen, Simon and Alfter, David and Periti, Francesco and Cassotti, Pierluigi", booktitle = "Proceedings of the 4th Workshop on Computational Approaches to Historical Language Change", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.lchange-1.9", doi = "10.18653/v1/2023.lchange-1.9", pages = "87--92", abstract = "We introduce a simple concept tracking approach to support conceptual history research. Building on the existing practices of conceptual historians, we use dictionaries to identify {``}anchors{''}, which represent primary dimensions of meaning of a concept. Then, we create a plot showing how a key concept has evolved over time in a historical corpus in relation to these dimensions. We demonstrate the approach by plotting the change of several key concepts in the COHA corpus.", }
We introduce a simple concept tracking approach to support conceptual history research. Building on the existing practices of conceptual historians, we use dictionaries to identify {``}anchors{''}, which represent primary dimensions of meaning of a concept. Then, we create a plot showing how a key concept has evolved over time in a historical corpus in relation to these dimensions. We demonstrate the approach by plotting the change of several key concepts in the COHA corpus.
[ "Adams, Jetske", "Larson, Martha", "Verheul, Jaap", "Boyden, Michael" ]
Anchors in Embedding Space: A Simple Concept Tracking Approach to Support Conceptual History Research
lchange-1.9
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.10.bib
https://aclanthology.org/2023.lchange-1.10/
@inproceedings{chen-etal-2023-chiwug-graph, title = "{C}hi{WUG}: A Graph-based Evaluation Dataset for {C}hinese Lexical Semantic Change Detection", author = "Chen, Jing and Chersoni, Emmanuele and Schlechtweg, Dominik and Prokic, Jelena and Huang, Chu-Ren", editor = "Tahmasebi, Nina and Montariol, Syrielle and Dubossarsky, Haim and Kutuzov, Andrey and Hengchen, Simon and Alfter, David and Periti, Francesco and Cassotti, Pierluigi", booktitle = "Proceedings of the 4th Workshop on Computational Approaches to Historical Language Change", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.lchange-1.10", doi = "10.18653/v1/2023.lchange-1.10", pages = "93--99", abstract = "Recent studies suggested that language models are efficient tools for measuring lexical semantic change. In our paper, we present the compilation of the first graph-based evaluation dataset for lexical semantic change in the context of the Chinese language, specifically covering the periods of pre- and post- Reform and Opening Up. Exploiting the existing framework DURel, we collect over 61,000 human semantic relatedness judgments for 40 targets. The inferred word usage graphs and semantic change scores provide a basis for visualization and evaluation of semantic change.", }
Recent studies suggested that language models are efficient tools for measuring lexical semantic change. In our paper, we present the compilation of the first graph-based evaluation dataset for lexical semantic change in the context of the Chinese language, specifically covering the periods of pre- and post- Reform and Opening Up. Exploiting the existing framework DURel, we collect over 61,000 human semantic relatedness judgments for 40 targets. The inferred word usage graphs and semantic change scores provide a basis for visualization and evaluation of semantic change.
[ "Chen, Jing", "Chersoni, Emmanuele", "Schlechtweg, Dominik", "Prokic, Jelena", "Huang, Chu-Ren" ]
ChiWUG: A Graph-based Evaluation Dataset for Chinese Lexical Semantic Change Detection
lchange-1.10
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster
https://aclanthology.org/2023.lchange-1.11.bib
https://aclanthology.org/2023.lchange-1.11/
@inproceedings{hoeken-etal-2023-towards-detecting, title = "Towards Detecting Lexical Change of Hate Speech in Historical Data", author = {Hoeken, Sanne and Spliethoff, Sophie and Schwandt, Silke and Zarrie{\ss}, Sina and Alacam, {\"O}zge}, editor = "Tahmasebi, Nina and Montariol, Syrielle and Dubossarsky, Haim and Kutuzov, Andrey and Hengchen, Simon and Alfter, David and Periti, Francesco and Cassotti, Pierluigi", booktitle = "Proceedings of the 4th Workshop on Computational Approaches to Historical Language Change", month = dec, year = "2023", address = "Singapore", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2023.lchange-1.11", doi = "10.18653/v1/2023.lchange-1.11", pages = "100--111", abstract = "The investigation of lexical change has predominantly focused on generic language evolution, not suited for detecting shifts in a particular domain, such as hate speech. Our study introduces the task of identifying changes in lexical semantics related to hate speech within historical texts. We present an interdisciplinary approach that brings together NLP and History, yielding a pilot dataset comprising 16th-century Early Modern English religious writings during the Protestant Reformation. We provide annotations for both semantic shifts and hatefulness on this data and, thereby, combine the tasks of Lexical Semantic Change Detection and Hate Speech Detection. Our framework and resulting dataset facilitate the evaluation of our applied methods, advancing the analysis of hate speech evolution.", }
The investigation of lexical change has predominantly focused on generic language evolution, not suited for detecting shifts in a particular domain, such as hate speech. Our study introduces the task of identifying changes in lexical semantics related to hate speech within historical texts. We present an interdisciplinary approach that brings together NLP and History, yielding a pilot dataset comprising 16th-century Early Modern English religious writings during the Protestant Reformation. We provide annotations for both semantic shifts and hatefulness on this data and, thereby, combine the tasks of Lexical Semantic Change Detection and Hate Speech Detection. Our framework and resulting dataset facilitate the evaluation of our applied methods, advancing the analysis of hate speech evolution.
[ "Hoeken, Sanne", "Spliethoff, Sophie", "Schw", "t, Silke", "Zarrie{\\ss}, Sina", "Alacam, {\\\"O}zge" ]
Towards Detecting Lexical Change of Hate Speech in Historical Data
lchange-1.11
[ "" ]
-1
-1
-1
-1
[]
[]
[]
0
Poster