|
import os |
|
import datasets |
|
import pandas as pd |
|
|
|
_CITATION = """\ |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
""" |
|
|
|
_HOMEPAGE = "" |
|
|
|
_LICENSE = "" |
|
|
|
_SUPERLIM_CITATION = """\ |
|
Yvonne Adesam, Aleksandrs Berdicevskis, Felix Morger (2020): SwedishGLUE – Towards a Swedish Test Set for Evaluating Natural Language Understanding Models BibTeX |
|
[1] Original Absabank: |
|
Jacobo Rouces, Lars Borin, Nina Tahmasebi (2020): Creating an Annotated Corpus for Aspect-Based Sentiment Analysis in Swedish, in Proceedings of the 5th conference in Digital Humanities in the Nordic Countries, Riga, Latvia, October 21-23, 2020. BibTeX |
|
[2] DaLAJ: |
|
Volodina, Elena, Yousuf Ali Mohammed, and Julia Klezl (2021). DaLAJ - a dataset for linguistic acceptability judgments for Swedish. In Proceedings of the 10th Workshop on Natural Language Processing for Computer Assisted Language Learning (NLP4CALL 2021). Linköping Electronic Conference Proceedings 177:3, s. 28-37. https://ep.liu.se/ecp/177/003/ecp2021177003.pdf |
|
[3] Analogy: |
|
Tosin Adewumi, Foteini Liwicki, Markus Liwicki. (2020). Corpora compared: The case of the Swedish Gigaword & Wikipedia corpora. In: Proceedings of the 8th SLTC, Gothenburg. arXiv preprint arXiv:2011.03281 |
|
[4] Swedish Test Set for SemEval 2020 Task 1: |
|
Unsupervised Lexical Semantic Change Detection: Dominik Schlechtweg, Barbara McGillivray, Simon Hengchen, Haim Dubossarsky, Nina Tahmasebi (2020): SemEval-2020 Task 1: Unsupervised Lexical Semantic Change Detection, in Proceedings of the Fourteenth Workshop on Semantic Evaluation (SemEval2020), Barcelona, Spain (Online), December 12, 2020. BibTeX |
|
[5] Winogender: |
|
Saga Hansson, Konstantinos Mavromatakis, Yvonne Adesam, Gerlof Bouma and Dana Dannélls (2021). The Swedish Winogender Dataset. In The 23rd Nordic Conference on Computational Linguistics (NoDaLiDa 2021), Reykjavik. |
|
[6] SuperSim: |
|
Hengchen, Simon and Tahmasebi, Nina (2021). SuperSim: a test set for word similarity and relatedness in Swedish. In The 23rd Nordic Conference on Computational Linguistics (NoDaLiDa 2021), Reykjavik. arXiv preprint arXiv:2014.05228 |
|
""" |
|
|
|
_SUPERLIM_DESCRIPTION = """\ |
|
SuperLim, A standardized suite for evaluation and analysis of Swedish natural language understanding systems. |
|
""" |
|
_DaLAJ_DESCRIPTION = """\ |
|
Determine whether a sentence is correct Swedish or not. |
|
""" |
|
_DaLAJ_CITATION = """\ |
|
[1] Original Absabank: |
|
Jacobo Rouces, Lars Borin, Nina Tahmasebi (2020): Creating an Annotated Corpus for Aspect-Based Sentiment Analysis in Swedish, in Proceedings of the 5th conference in Digital Humanities in the Nordic Countries, Riga, Latvia, October 21-23, 2020. BibTeX |
|
[2] DaLAJ: |
|
Volodina, Elena, Yousuf Ali Mohammed, and Julia Klezl (2021). DaLAJ - a dataset for linguistic acceptability judgments for Swedish. In Proceedings of the 10th Workshop on Natural Language Processing for Computer Assisted Language Learning (NLP4CALL 2021). Linköping Electronic Conference Proceedings 177:3, s. 28-37. https://ep.liu.se/ecp/177/003/ecp2021177003.pdf |
|
""" |
|
|
|
_SweAna_DESCRIPTION = """\ |
|
The Swedish analogy test set follows the format of the original Google version. However, it is bigger and balanced across the 2 major categories, |
|
having a total of 20,638 samples, made up of 10,381 semantic and 10,257 syntactic samples. It is also roughly balanced across the syntactic subsections. |
|
There are 5 semantic subsections and 6 syntactic subsections. The dataset was constructed, partly using the samples in the English version, |
|
with the help of tools dedicated to Swedish translation and it was proof-read for corrections by two native speakers (with a percentage agreement of 98.93\%).""" |
|
_SweAna_CITATION = """\ |
|
[1] Original Absabank: |
|
Jacobo Rouces, Lars Borin, Nina Tahmasebi (2020): Creating an Annotated Corpus for Aspect-Based Sentiment Analysis in Swedish, in Proceedings of the 5th conference in Digital Humanities in the Nordic Countries, Riga, Latvia, October 21-23, 2020. BibTeX |
|
""" |
|
|
|
_SweDiag_DESCRIPTION = """\ |
|
Färdig preliminär översättning av SuperGLUE diagnostik. Datan innehåller alla ursprungliga annoterade satspar från SuperGLUE tillsammans |
|
med deras svenska översättningar.""" |
|
_SweDiag_CITATION = """\ |
|
""" |
|
_SweFaq_DESCRIPTION = """\ |
|
Vanliga frågor från svenska myndigheters webbsidor med svar i randomiserad ordning""" |
|
_SweFaq_CITATION = """\ |
|
""" |
|
_SweFracas_DESCRIPTION = """\ |
|
A textual inference/entailment problem set, derived from FraCas. The original English Fracas [1] was converted to html and edited by Bill MacCartney [2], |
|
and then automatically translated to Swedish by Peter Ljunglöf and Magdalena Siverbo [3]. The current tabular form of the set was created by Aleksandrs Berdicevskis |
|
by merging the Swedish and English versions and removing some of the problems. Finally, Lars Borin went through all the translations, correcting and Swedifying them manually. |
|
As a result, many translations are rather liberal and diverge noticeably from the English original.""" |
|
_SweFracas_CITATION = """\ |
|
""" |
|
_SwePar_DESCRIPTION = """\ |
|
SweParaphrase is a subset of the automatically translated Swedish Semantic Textual Similarity dataset (Isbister and Sahlgren, 2020). |
|
It consists of 165 manually corrected Swedish sentence pairs paired with the original English sentences and their similarity scores |
|
ranging between 0 (no meaning overlap) and 5 (meaning equivalence). These scores were taken from the English data, they were assigned |
|
by Crowdsourcing through Mechanical Turk. Each sentence pair belongs to one genre (e.g. news, forums or captions). |
|
The task is to determine how similar two sentences are.""" |
|
_SwePar_CITATION = """\ |
|
""" |
|
_SweSat_DESCRIPTION = """\ |
|
The dataset provides a gold standard for Swedish word synonymy/definition. The test items are collected from the Swedish Scholastic |
|
Aptitude Test (högskoleprovet), currently spanning the years 2006--2021 and 822 vocabulary test items. The task for the tested system |
|
is to determine which synonym or definition of five alternatives is correct for each test item. |
|
""" |
|
_SweSat_CITATION = """\ |
|
""" |
|
|
|
_SweSim_DESCRIPTION = """\ |
|
SuperSim is a large-scale similarity and relatedness test set for Swedish built with expert human judgments. The test set is composed of 1360 word-pairs independently judged for both relatedness and similarity by five annotators.""" |
|
|
|
_SweWgr_DESCRIPTION = """\ |
|
The SweWinogender test set is diagnostic dataset to measure gender bias in coreference resolution. It is modelled after the English Winogender benchmark, |
|
and is released with reference statistics on the distribution of men and women between occupations and the association between gender and occupation in modern corpus material.""" |
|
|
|
_SweWsc_DESCRIPTION = """\ |
|
SweWinograd is a pronoun resolution test set, containing constructed items in the style of Winograd schema’s. The interpretation of the target pronouns is determined by (common sense) |
|
reasoning and knowledge, and not by syntactic constraints, lexical distributional information or discourse structuring patterns. |
|
The dataset contains 90 multiple choice with multiple correct answers test items.""" |
|
|
|
_SweWic_DESCRIPTION = """\ |
|
The Swedish Word-in-Context dataset provides a benchmark for evaluating distributional models of word meaning, in particular context-sensitive/dynamic models. Constructed following the principles of the (English) |
|
Word-in-Context dataset, SweWiC consists of 1000 sentence pairs, where each sentence in a pair contains an occurence of a potentially ambiguous focus word specific to that pair. The question posed to the tested |
|
system is whether these two occurrences represent instances of the same word sense. There are 500 same-sense pairs and 500 different-sense pairs.""" |
|
|
|
_argumentation_sentences_DESCRIPTION = """\ |
|
Argumentation sentences is a translated corpus for the task of identifying stance in relation to a topic. It consists of sentences labeled with pro, con or non in relation to one of six topics. |
|
The original dataset can be found here https://github.com/trtm/AURC. The test set is manually corrected translations, the training set is machine translated. """ |
|
|
|
_argumentation_sentences_DESCRIPTION_CITATION = """\ |
|
""" |
|
|
|
_URL = "https://huggingface.co/datasets/sbx/superlim-2/resolve/main/data/" |
|
_TASKS = { |
|
"absabank-imm": "absabank-imm", |
|
"argumentation_sent":"argumentation-sentences", |
|
"dalaj-ged": "dalaj-ged-superlim", |
|
"swesim_relatedness": "supersim-superlim/supersim-superlim-relatedness", |
|
"swesim_similarity": "supersim-superlim/supersim-superlim-similarity", |
|
"sweana": "sweanalogy", |
|
"swefaq": "swefaq", |
|
"swepar": "sweparaphrase", |
|
"swesat": "swesat-synonyms", |
|
"swewic": "swewic", |
|
"swefracas": "swefracas", |
|
"swediagnostics": "swediagnostics" |
|
} |
|
|
|
class SuperLimConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for SuperLim.""" |
|
|
|
def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs): |
|
"""BuilderConfig for SuperLim. |
|
|
|
Args: |
|
features: `list[string]`, list of the features that will appear in the |
|
feature dict. Should not include "label". |
|
data_url: `string`, url to download the zip file from. |
|
citation: `string`, citation for the data set. |
|
url: `string`, url for information about the data set. |
|
label_classes: `list[string]`, the list of classes for the label if the |
|
label is present as a string. Non-string labels will be cast to either |
|
'False' or 'True'. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
super(SuperLimConfig, self).__init__(version=datasets.Version("2.0.0"), **kwargs) |
|
self.features = features |
|
self.label_classes = label_classes |
|
self.data_url = data_url |
|
self.citation = citation |
|
self.url = url |
|
|
|
class SuperLim(datasets.GeneratorBasedBuilder): |
|
"""The SuperLim benchmark.""" |
|
|
|
VERSION = datasets.Version("2.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="absabank-imm", version=VERSION, description=_DaLAJ_DESCRIPTION), |
|
datasets.BuilderConfig(name="dalaj-ged", version=VERSION, description=_DaLAJ_DESCRIPTION), |
|
datasets.BuilderConfig(name="swesim_relatedness", version=VERSION, description=_SweSim_DESCRIPTION), |
|
datasets.BuilderConfig(name="swesim_similarity", version=VERSION, description=_SweSim_DESCRIPTION), |
|
datasets.BuilderConfig(name="sweana", version=VERSION, description=_SweAna_DESCRIPTION), |
|
datasets.BuilderConfig(name="swefaq", version=VERSION, description=_SweFaq_DESCRIPTION), |
|
datasets.BuilderConfig(name="swepar", version=VERSION, description=_SwePar_DESCRIPTION), |
|
datasets.BuilderConfig(name="swesat", version=VERSION, description=_SweSat_DESCRIPTION), |
|
datasets.BuilderConfig(name="swewic", version=VERSION, description=_SweWic_DESCRIPTION), |
|
datasets.BuilderConfig(name="argumentation_sent", version=VERSION, description=_argumentation_sentences_DESCRIPTION), |
|
datasets.BuilderConfig(name="swefracas", version=VERSION, description=_argumentation_sentences_DESCRIPTION) |
|
] |
|
|
|
def _info(self): |
|
|
|
if self.config.name == "dalaj-ged": |
|
features = datasets.Features( |
|
{ |
|
"sentence": datasets.Value("string"), |
|
"label": datasets.Value("string"), |
|
"meta": datasets.Value("string") |
|
} |
|
) |
|
elif self.config.name == 'absabank-imm': |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
"label": datasets.Value(dtype='float32') |
|
} |
|
) |
|
elif self.config.name == "sweana": |
|
features = datasets.Features( |
|
{ |
|
"a": datasets.Value("string"), |
|
"b": datasets.Value("string"), |
|
"c": datasets.Value("string"), |
|
"d": datasets.Value("string"), |
|
"relation": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "swefaq": |
|
features = datasets.Features( |
|
{ |
|
"category_id": datasets.Value("string"), |
|
"candidate_answers": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"label": datasets.Value("string"), |
|
"meta": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "swepar": |
|
features = datasets.Features( |
|
{ |
|
"sentence_1": datasets.Value("string"), |
|
"sentence_2": datasets.Value("string"), |
|
"similarity_score": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "swesat": |
|
features = datasets.Features( |
|
{ |
|
"target_item": datasets.Value("string"), |
|
"answer_1": datasets.Value("string"), |
|
"answer_2": datasets.Value("string"), |
|
"answer_3": datasets.Value("string"), |
|
"answer_4": datasets.Value("string"), |
|
"answer_5": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "swesim_relatedness": |
|
features = datasets.Features( |
|
{ |
|
"word_1": datasets.Value("string"), |
|
"word_2": datasets.Value("string"), |
|
"relatedness": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "swesim_similarity": |
|
features = datasets.Features( |
|
{ |
|
"word_1": datasets.Value("string"), |
|
"word_2": datasets.Value("string"), |
|
"similarity": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == "swewic": |
|
features = datasets.Features( |
|
{ |
|
"sentence_1": datasets.Value("string"), |
|
"word_1": datasets.Value("string"), |
|
"sentence_2": datasets.Value("string"), |
|
"word_2": datasets.Value("string"), |
|
"same_sense": datasets.Value("string"), |
|
"start_1": datasets.Value("string"), |
|
"start_2": datasets.Value("string"), |
|
"end_1": datasets.Value("string"), |
|
"end_2": datasets.Value("string"), |
|
} |
|
) |
|
elif self.config.name == 'argumentation_sent': |
|
features = datasets.Features( |
|
{ |
|
"topic": datasets.Value("string"), |
|
"label": datasets.Value("string"), |
|
"sentence": datasets.Value("string") |
|
} |
|
) |
|
elif self.config.name == 'swefracas': |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"original_id": datasets.Value("string"), |
|
"attribute": datasets.Value("string"), |
|
"value": datasets.Value("string") |
|
} |
|
) |
|
elif self.config.name == 'swediagnostics': |
|
features = datasets.Features( |
|
{ |
|
'lexical_semantics': datasets.Value("string"), |
|
'predicate_argument_structure': datasets.Value("string"), |
|
'logic': datasets.Value("string"), |
|
'knowledge': datasets.Value("string"), |
|
'domain': datasets.Value("string"), |
|
'premise': datasets.Value("string"), |
|
'hypothesis': datasets.Value("string"), |
|
'label':datasets.Value("string") |
|
} |
|
) |
|
|
|
elif self.config.name == 'swedn': |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"headline": datasets.Value("string"), |
|
"summary": datasets.Value("string"), |
|
"article": datasets.Value("string"), |
|
"article_category": datasets.Value("string") |
|
} |
|
) |
|
else: |
|
raise ValueError(f"Subset {self.config.name} does not exist.") |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
file_format = 'jsonl' |
|
splits = [] |
|
data_dir_test = dl_manager.download_and_extract(os.path.join(_URL,_TASKS[self.config.name],f"{_TASKS[self.config.name]}_test.{file_format}")) |
|
split_test = datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepath": data_dir_test, |
|
"split": "test" |
|
}, |
|
) |
|
splits.append(split_test) |
|
if self.config.name in ("absabank-imm", "dalaj-ged", "swefaq", "swewic", "swedn"): |
|
data_dir_dev = dl_manager.download_and_extract(os.path.join(_URL,_TASKS[self.config.name],f"{_TASKS[self.config.name]}_dev.{file_format}")) |
|
split_dev = datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepath": data_dir_dev, |
|
"split": "dev", |
|
}, |
|
) |
|
splits.append(split_dev) |
|
if self.config.name in ("absabank-imm", "dalaj-ged", "swefaq", "swewic", "argumentation_sent", "swedn"): |
|
data_dir_train = dl_manager.download_and_extract(os.path.join(_URL,_TASKS[self.config.name],f"{_TASKS[self.config.name]}_train.{file_format}")) |
|
split_train = datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepath": data_dir_train, |
|
"split": "train", |
|
}, |
|
) |
|
splits.append(split_train) |
|
return splits |
|
|
|
def _generate_examples(self, filepath, split): |
|
|
|
|
|
df = pd.read_json(filepath, lines=True) |
|
for key, row in df.iterrows(): |
|
if self.config.name == "dalaj-ged": |
|
|
|
yield key, { |
|
"sentence": row["sentence"], |
|
"label": row["label"], |
|
"meta": row["meta"], |
|
} |
|
elif self.config.name == "absabank-imm": |
|
yield key, { |
|
"id": row['id'], |
|
"text": row["text"], |
|
"label": row["label"], |
|
} |
|
elif self.config.name == "sweana": |
|
yield key, { |
|
"a": row["A"], |
|
"b": row["B"], |
|
"c": row["C"], |
|
"d": row["D"], |
|
"relation": row["relation"], |
|
} |
|
elif self.config.name == "swefaq": |
|
yield key, { |
|
"category_id": row['category_id'], |
|
"question": row["question"], |
|
"candidate_answers": row['candidate_answers'], |
|
"label": row["label"], |
|
"meta": row['meta'], |
|
} |
|
elif self.config.name == "swepar": |
|
yield key, { |
|
"sentence_1": row["sentence_1"], |
|
"sentence_2": row["sentence_2"], |
|
"similarity_score": row["similarity_score"], |
|
} |
|
elif self.config.name == "swesat": |
|
yield key, { |
|
"target_item": row["target_item"], |
|
"answer_1": row["answer_1"], |
|
"answer_2": row["answer_2"], |
|
"answer_3": row["answer_3"], |
|
"answer_4": row["answer_4"], |
|
"answer_5": row["answer_5"], |
|
} |
|
elif self.config.name == "swesim_relatedness": |
|
yield key, { |
|
"word_1": row["Word 1 "], |
|
"word_2": row[" Word 2 "], |
|
"relatedness": row[" Average "], |
|
} |
|
elif self.config.name == "swesim_similarity": |
|
yield key, { |
|
"word_1": row["Word 1 "], |
|
"word_2": row[" Word 2 "], |
|
"similarity": row[" Average "], |
|
} |
|
elif self.config.name == "swewgr": |
|
yield key, { |
|
"text": row["text"], |
|
"challenge": row["challenge"], |
|
"responses": row["responses"], |
|
} |
|
elif self.config.name == "swewic": |
|
yield key, { |
|
"sentence_1": row["sentence1"], |
|
"word_1": row["word1"], |
|
"sentence_2": row["sentence2"], |
|
"word_2": row["word2"], |
|
"same_sense": row["same_sense"], |
|
"start_1": row["start1"], |
|
"end_1": row["end1"], |
|
"start_2": row["start2"], |
|
"end_2": row["end2"], |
|
} |
|
elif self.config.name == "swewsc": |
|
yield key, { |
|
"passage": row["passage"], |
|
"challenge_text": row["challenge_text"], |
|
"response_text": row["response_text"], |
|
"challenge_begin":row["challenge_begin"], |
|
"challenge_end":row["challenge_end"], |
|
"response_begin":row["response_begin"], |
|
"response_end":row["response_end"], |
|
"label":row["label"], |
|
} |
|
|
|
elif self.config.name == "argumentation_sent": |
|
yield key, { |
|
"topic": row["topic"], |
|
"label": row["label"], |
|
"sentence" : row["sentence"], |
|
} |
|
|
|
elif self.config.name == "swediagnostics": |
|
yield key, { |
|
'lexical_semantics': row['lexical_semantics'], |
|
'predicate_argument_structure': row['predicate_argument_structure'], |
|
'logic': row['logic'], |
|
'knowledge': row['knowledge'], |
|
'domain': row['domain'], |
|
'premise': row['premise'], |
|
'hypothesis': row['hypothesis'], |
|
'label': row['label'] |
|
} |
|
|
|
elif self.config.name == "swefracas": |
|
yield key, { |
|
'original_id': row['original_id'], |
|
'attribute': row['attribute'], |
|
'value': row['value'] |
|
} |
|
|
|
elif self.config.name == "swedn": |
|
yield key, { |
|
'id': row['id'], |
|
'headline': row['headline'], |
|
'summary': row['summary'], |
|
'article': row['article'], |
|
'article_category': row['article_category'] |
|
} |
|
else: |
|
raise ValueError(f"Subset {self.config.name} does not exist") |