|
"""PRES retrieval dataset""" |
|
|
|
|
|
import json |
|
import csv |
|
|
|
import os |
|
|
|
import datasets |
|
|
|
_DESCRIPTION = 'Reference: https://mklab.iti.gr/results/spanish-passage-retrieval-dataset/' |
|
|
|
_HOMEPAGE_URL = 'https://mklab.iti.gr/results/spanish-passage-retrieval-dataset/' |
|
_LANGUAGES = {'es': 'ES'} |
|
_VERSION = '1.0.0' |
|
|
|
|
|
URL = 'https://huggingface.co/datasets/jinaai/spanish_passage_retrieval/resolve/main/' |
|
|
|
|
|
class PRESConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for PRESConfig.""" |
|
|
|
def __init__(self, **kwargs): |
|
super(PRESConfig, self).__init__( |
|
version=datasets.Version(_VERSION, ''), **kwargs |
|
), |
|
|
|
|
|
class PRES(datasets.GeneratorBasedBuilder): |
|
"""The Spanish Passage Retrieval dataset (PRES)""" |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name=name, |
|
description=f'{name.title()} of the Spanish Passage Retrieval dataset.', |
|
) |
|
for name in ['corpus.sentences', 'corpus.documents', 'queries', 'qrels.s2s', 'qrels.s2p'] |
|
] |
|
|
|
BUILDER_CONFIG_CLASS = PRESConfig |
|
|
|
def __init__(self, *args, **kwargs): |
|
super().__init__(*args, **kwargs) |
|
self._data = None |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"_id": datasets.Value("string"), |
|
"text": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE_URL, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TEST), |
|
] |
|
|
|
def _generate_examples( |
|
self, |
|
split: str = None, |
|
): |
|
|
|
if not self._data: |
|
with open(os.path.join(URL, 'docs.json')) as f: |
|
docs = json.load(f) |
|
|
|
with open(os.path.join(URL, 'topics.json')) as f: |
|
topics = json.load(f) |
|
|
|
with open(os.path.join(URL, 'relevance_passages.json')) as f: |
|
rel_passages = json.load(f) |
|
|
|
corpus_sentences = [] |
|
corpus_documents = [] |
|
queries = dict() |
|
qrels_s2s = dict() |
|
qrels_s2p = dict() |
|
topic_to_queries = dict() |
|
for topic in topics['topics']: |
|
topic_to_queries[topic['number']] = [] |
|
for query in topic['queries']: |
|
qid = query['number'] |
|
queries[qid] = query['text'] |
|
topic_to_queries[topic['number']].append(qid) |
|
qrels_s2s[qid] = [] |
|
qrels_s2p[qid] = [] |
|
|
|
known_passage_ids = set() |
|
|
|
for annotated_topic in rel_passages['topics']: |
|
topic = annotated_topic['number'] |
|
for annotation in annotated_topic['annotations']: |
|
passage_id = f'doc_{annotation["docNo"]}_{annotation["start"]}_{annotation["end"]}' |
|
doc_id = f'doc_{annotation["docNo"]}' |
|
if passage_id not in known_passage_ids: |
|
corpus_sentences.append({'_id': passage_id, 'text': annotation['text']}) |
|
known_passage_ids.add(passage_id) |
|
for qid in topic_to_queries[topic]: |
|
qrels_s2s[qid].append(passage_id) |
|
qrels_s2p[qid].append(doc_id) |
|
|
|
for doc in docs['documents']: |
|
doc_id = f'doc_{doc["docNo"]}' |
|
corpus_documents.append({'_id': doc_id, 'text': doc['text']}) |
|
|
|
|
|
self._data = { |
|
'corpus.sentences': corpus_sentences, |
|
'corpus.documents': corpus_documents, |
|
'queries': queries, |
|
'qrels.s2s': qrels_s2s, |
|
'qrels.s2p': qrels_s2p |
|
} |
|
|
|
if self.config.name not in self._data: |
|
raise ValueError(f'Unknown config name: {self.config.name}') |
|
|
|
if self.config.name.startswith('corpus'): |
|
for line in self._data[self.config.name]: |
|
yield line['_id'], line |
|
elif self.config.name == 'queries': |
|
for qid, query in self._data['queries'].items(): |
|
yield qid, { |
|
"_id": qid, |
|
"text": query, |
|
} |
|
elif self.config.name.startswith('qrels'): |
|
for qid, dids in self._data[self.config.name].items(): |
|
yield qid, { |
|
"_id": qid, |
|
"text": ' '.join(dids), |
|
} |
|
|