fiqa_pt / fiqa_pt.py
leonardo-avila's picture
Update fiqa_pt.py
175a1cd
raw
history blame
3.82 kB
import csv
import json
import os
import datasets
_DESCRIPTION = """\
FIQA translated dataset to portuguese
"""
_URLS = {
"corpus": "https://huggingface.co/datasets/leonardo-avila/fiqa_pt/blob/main/corpus_pt.tsv",
"topics": "https://huggingface.co/datasets/leonardo-avila/fiqa_pt/blob/main/topics_pt.tsv",
"qrel": "https://huggingface.co/qrel.tsv",
}
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
class BeirPT(datasets.GeneratorBasedBuilder):
"""BEIR BenchmarkDataset."""
VERSION = datasets.Version("1.1.0")
# You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('my_dataset', 'first_domain')
# data = datasets.load_dataset('my_dataset', 'second_domain')
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="corpus", version=VERSION, description="Load corpus"),
datasets.BuilderConfig(name="topics", version=VERSION, description="Load topics"),
datasets.BuilderConfig(name="qrel", version=VERSION, description="Load qrel"),
]
DEFAULT_CONFIG_NAME = "corpus" # It's not mandatory to have a default configuration. Just use one if it make sense.
def _info(self):
if self.config.name in ["corpus", "topics"]:
features = datasets.Features(
{
"id": datasets.Value("string"),
"text": datasets.Value("string"),
}
)
else: # This is an example to show how to have different features for "first_domain" and "second_domain"
features = datasets.Features(
{
"query_id": datasets.Value("string"),
"doc_id": datasets.Value("string"),
"rel": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features
)
def _split_generators(self, dl_manager):
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
urls = _URLS[self.config.name]
data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=self.config.name,
# These kwargs will be passed to _generate_examples
gen_kwargs={"filepath": data_dir},
),
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_examples(self, filepath, split):
with open(filepath, encoding="utf-8") as f:
if self.config.name in ["corpus", "topics"]:
for line in f:
fields = line.strip().split("\t")
idx = fields[0]
text = fields[1]
yield idx, text
else:
for line in f:
if "query-id" not in line:
fields = line.strip().split("\t")
query_id = fields[0]
doc_id = fields[1]
rel = int(fields[2])
yield query_id, doc_id, rel