import json import datasets import pandas as pd SUPPORTED_LANGUAGES = [ "sl", "ur", "sw", "uz", "vi", "sq", "ms", "km", "hy", "da", "ky", "mg", "mn", "ja", "el", "it", "is", "ru", "tl", "so", "pt", "uk", "sr", "sn", "ht", "bs", "my", "ar", "hr", "nl", "bn", "ne", "hi", "ka", "az", "ko", "id", "fr", "es", "en", "fa", "lo", "iw", "th", "tr", "zht", "zhs", "ti", "tg", "control", ] SUPPORTED_SOURCES = ["llm"] ROOT_DIR = "data" class LangConfig(datasets.BuilderConfig): def __init__(self, language, source="all", n_hits=10, **kwargs): super(LangConfig, self).__init__(**kwargs) # TODO: support specifying multiple languages if language == "all": languages = SUPPORTED_LANGUAGES else: languages = [language] self.languages = languages self.source = source self.n_hits = n_hits self.data_root_dir = ROOT_DIR class BordIRLinesDataset(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ LangConfig(name=lang, language=lang, description=f"{lang.upper()} dataset") for lang in SUPPORTED_LANGUAGES ] + [LangConfig(name="all", language="all", description="All languages dataset")] def _info(self): return datasets.DatasetInfo( description="IR Dataset for BordIRLines paper.", features=datasets.Features( { "query_id": datasets.Value("string"), "query": datasets.Value("string"), "territory": datasets.Value("string"), "rank": datasets.Value("int32"), "score": datasets.Value("float32"), "doc_id": datasets.Value("string"), "doc_text": datasets.Value("string"), } ), ) def _split_generators(self, dl_manager): base_url = self.config.data_root_dir downloaded_queries = dl_manager.download_and_extract( { "queries": f"{base_url}/queries.tsv", } ) languages_to_download = self.config.languages sources_to_download = ( SUPPORTED_SOURCES if self.config.source == "all" else [self.config.source] ) splits = [] for lang in languages_to_download: downloaded_data = {} for source in sources_to_download: lang_docs = lang if lang != "control" else "en" downloaded_data[source] = dl_manager.download_and_extract( { "docs": f"{base_url}/{lang_docs}/{source}/{lang_docs}_docs.json", "hits": f"{base_url}/{lang}/{source}/{lang}_query_hits.tsv", } ) split = datasets.SplitGenerator( name=lang, gen_kwargs={ "language": lang, "downloaded_data": downloaded_data, "queries_path": downloaded_queries["queries"], }, ) splits.append(split) return splits def _generate_examples(self, language, downloaded_data, queries_path): source = self.config.source n_hits = self.config.n_hits if language not in SUPPORTED_LANGUAGES: raise ValueError( f"Language {language} is not supported. Supported languages: {SUPPORTED_LANGUAGES}" ) if source not in SUPPORTED_SOURCES and source != "all": raise ValueError( f"Source {source} is not supported. Supported sources: {SUPPORTED_SOURCES}" ) queries_df = pd.read_csv(queries_path, sep="\t") query_map = dict(zip(queries_df["query_id"], queries_df["query_text"])) counter = 0 for src in downloaded_data: if source != "all" and src != source: continue docs_path = downloaded_data[src]["docs"] hits_path = downloaded_data[src]["hits"] with open(docs_path, "r", encoding="utf-8") as f: docs = json.load(f) hits = pd.read_csv(hits_path, sep="\t") if n_hits: hits = hits.groupby("query_id").head(n_hits) for _, row in hits.iterrows(): doc_id = row["doc_id"] if doc_id in docs: query_id = row["query_id"] query_text = query_map.get(query_id, "") yield ( counter, { "query_id": query_id, "query": query_text, "territory": row["territory"], "rank": row["rank"], "score": row["score"], "doc_id": doc_id, "doc_text": docs[doc_id], }, ) counter += 1