|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""RO-STS: The Romanian Semantic Textual Similarity Dataset""" |
|
|
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """\ |
|
Article under review |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
The RO-STS (Romanian Semantic Textual Similarity) dataset contains 8628 pairs of sentences with their similarity score. It is a high-quality translation of the STS benchmark dataset. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/dumitrescustefan/RO-STS/" |
|
|
|
_LICENSE = "CC BY-SA 4.0 License" |
|
|
|
|
|
|
|
_URL = "https://raw.githubusercontent.com/dumitrescustefan/RO-STS/master/dataset/text-similarity/" |
|
_TRAINING_FILE = "RO-STS.train.tsv" |
|
_TEST_FILE = "RO-STS.test.tsv" |
|
_DEV_FILE = "RO-STS.dev.tsv" |
|
|
|
|
|
class ROSTSConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for RO-STS dataset""" |
|
|
|
def __init__(self, **kwargs): |
|
super(ROSTSConfig, self).__init__(**kwargs) |
|
|
|
|
|
class RoSts(datasets.GeneratorBasedBuilder): |
|
"""RO-STS dataset""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
BUILDER_CONFIGS = [ |
|
ROSTSConfig(name="ro_sts", version=VERSION, description="RO-STS dataset"), |
|
] |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"score": datasets.Value("float"), |
|
"sentence1": datasets.Value("string"), |
|
"sentence2": datasets.Value("string"), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage=_HOMEPAGE, |
|
|
|
license=_LICENSE, |
|
|
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
urls_to_download = {"train": _URL + _TRAINING_FILE, "dev": _URL + _DEV_FILE, "test": _URL + _TEST_FILE} |
|
|
|
downloaded_files = dl_manager.download(urls_to_download) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={"filepath": downloaded_files["train"]}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={"filepath": downloaded_files["test"]}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={"filepath": downloaded_files["dev"]}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""This function returns the examples in the raw (text) form.""" |
|
with open(filepath, encoding="utf-8") as f: |
|
|
|
reader = f.readlines() |
|
for idx, row in enumerate(reader): |
|
splits = row.strip().split("\t") |
|
yield idx, { |
|
"score": splits[0], |
|
"sentence1": splits[1], |
|
"sentence2": splits[2], |
|
} |
|
|