Datasets:

ArXiv:
License:
bordirlines / bordirlines.py
manestay's picture
add data for 4 IR modes
9948645
raw
history blame
5.03 kB
import json
from functools import lru_cache
import datasets
import pandas as pd
SUPPORTED_LANGUAGES = [
"sl",
"ur",
"sw",
"uz",
"vi",
"sq",
"ms",
"km",
"hy",
"da",
"ky",
"mg",
"mn",
"ja",
"el",
"it",
"is",
"ru",
"tl",
"so",
"pt",
"uk",
"sr",
"sn",
"ht",
"bs",
"my",
"ar",
"hr",
"nl",
"bn",
"ne",
"hi",
"ka",
"az",
"ko",
"id",
"fr",
"es",
"en",
"fa",
"lo",
"iw",
"th",
"tr",
"zht",
"zhs",
"ti",
"tg",
"control",
]
SYSTEMS = ["openai"]
MODES = ["qlang", "qlang_en", "en", "rel_langs"]
# # get combination of systems and supported modes
# SUPPORTED_SOURCES = [f"{system}.{mode}" for system in SYSTEMS for mode in MODES]
ROOT_DIR = "data"
class BordIRlinesConfig(datasets.BuilderConfig):
def __init__(self, language, n_hits=10, **kwargs):
super(BordIRlinesConfig, self).__init__(**kwargs)
self.language = language
self.n_hits = n_hits
self.data_root_dir = ROOT_DIR
@lru_cache
def load_json(path):
with open(path, "r", encoding="utf-8") as f:
return json.load(f)
@lru_cache
def replace_lang_str(path, lang):
parent = path.rsplit("/", 2)[0]
return f"{parent}/{lang}/{lang}_docs.json"
class BordIRLinesDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
BordIRlinesConfig(
name=lang,
language=lang,
description=f"{lang.upper()} dataset",
)
for lang in SUPPORTED_LANGUAGES
]
def _info(self):
return datasets.DatasetInfo(
description="IR Dataset for BordIRLines paper.",
features=datasets.Features(
{
"query_id": datasets.Value("string"),
"query": datasets.Value("string"),
"territory": datasets.Value("string"),
"rank": datasets.Value("int32"),
"score": datasets.Value("float32"),
"doc_id": datasets.Value("string"),
"doc_text": datasets.Value("string"),
"doc_lang": datasets.Value("string"),
}
),
)
def _split_generators(self, dl_manager):
base_url = self.config.data_root_dir
downloaded_queries = dl_manager.download_and_extract(
{
"queries": f"{base_url}/queries.tsv",
}
)
queries_df = pd.read_csv(downloaded_queries["queries"], sep="\t")
lang = self.config.language
splits = []
downloaded_data = {}
lang_docs = lang if lang != "control" else "en"
for system in SYSTEMS:
for mode in MODES:
print("system", system, "mode", mode)
source = f"{system}.{mode}"
downloaded_data[source] = dl_manager.download_and_extract(
{
"docs": f"{base_url}/{lang_docs}/{lang_docs}_docs.json",
"hits": f"{base_url}/{lang}/{system}/{mode}/{lang}_query_hits.tsv",
}
)
split = datasets.SplitGenerator(
name=f"{system}.{mode}",
gen_kwargs={
"downloaded_data": downloaded_data[source],
"queries_df": queries_df,
},
)
splits.append(split)
return splits
def _generate_examples(self, downloaded_data, queries_df):
n_hits = self.config.n_hits
query_map = dict(zip(queries_df["query_id"], queries_df["query_text"]))
counter = 0
docs_path = downloaded_data["docs"]
hits_path = downloaded_data["hits"]
curr_lang = docs_path.split("/")[-1].split("_")[0]
docs = load_json(docs_path)
hits = pd.read_csv(hits_path, sep="\t")
if n_hits:
hits = hits.groupby("query_id").head(n_hits)
for _, row in hits.iterrows():
doc_id = row["doc_id"]
if row["doc_id"] not in docs and curr_lang != row["doc_lang"]:
docs_path_local = replace_lang_str(docs_path, row["doc_lang"])
docs_local = load_json(docs_path_local)
else:
docs_local = docs
query_id = row["query_id"]
query_text = query_map.get(query_id, "")
yield (
counter,
{
"query_id": query_id,
"query": query_text,
"territory": row["territory"],
"rank": row["rank"],
"score": row["score"],
"doc_id": doc_id,
"doc_text": docs_local[doc_id],
"doc_lang": row["doc_lang"],
},
)
counter += 1