import datasets import os import pickle import json class Dwy100kDWConfig(datasets.BuilderConfig): def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs): super(Dwy100kDWConfig, self).__init__(version=datasets.Version("0.0.1"), **kwargs) self.features = features self.label_classes = label_classes self.data_url = data_url self.citation = citation self.url = url class Dwy100kDW(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ Dwy100kDWConfig( name="source", features=["column1", "column2", "column3"], citation="TODO", url="TODO", data_url="https://huggingface.co/datasets/matchbench/dwy100k-d-w/resolve/main/dwy-dbp-wd-100k.zip" ), Dwy100kDWConfig( name="target", features=["column1", "column2", "column3"], citation="TODO", url="TODO", data_url="https://huggingface.co/datasets/matchbench/dwy100k-d-w/resolve/main/dwy-dbp-wd-100k.zip" ), Dwy100kDWConfig( name="pairs", features=["left_id", "right_id"], citation="TODO", url="TODO", data_url="https://huggingface.co/datasets/matchbench/dwy100k-d-w/resolve/main/dwy-dbp-wd-100k.zip" ), ] def _info(self): if self.config.name=="source": features = {feature: datasets.Value("string") for feature in self.config.features} elif self.config.name=="target": features = {feature: datasets.Value("string") for feature in self.config.features} elif self.config.name=="pairs": features = {feature: datasets.Value("string") for feature in self.config.features} return datasets.DatasetInfo( features=datasets.Features(features) ) def _split_generators(self, dl_manager): dl_dir = dl_manager.download_and_extract(self.config.data_url) or "" #task_name = _get_task_name_from_data_url(self.config.data_url) #dl_dir = os.path.join(dl_dir, task_name) if self.config.name == "source": return [ datasets.SplitGenerator( name="ent_ids", gen_kwargs={ "data_file": os.path.join(dl_dir, "ent_ids_1"), "split": "ent_ids", }, ), datasets.SplitGenerator( name="rel_triples_id", gen_kwargs={ "data_file": os.path.join(dl_dir, "triples_1"), "split": "rel_triples_id", }, ), ] elif self.config.name == "target": return [ datasets.SplitGenerator( name="ent_ids", gen_kwargs={ "data_file": os.path.join(dl_dir, "ent_ids_2"), "split": "ent_ids", }, ), datasets.SplitGenerator( name="rel_triples_id", gen_kwargs={ "data_file": os.path.join(dl_dir, "triples_2"), "split": "rel_triples_id", }, ) ] elif self.config.name == "pairs": return [ datasets.SplitGenerator( name="train", gen_kwargs={ "data_file": os.path.join(dl_dir, "train_ent_ids"), "split": "train", }, ), datasets.SplitGenerator( name="valid", gen_kwargs={ "data_file": os.path.join(dl_dir, "valid_ent_ids"), "split": "valid", }, ), datasets.SplitGenerator( name="test", gen_kwargs={ "data_file": os.path.join(dl_dir, "ref_ent_ids"), "split": "test", }, ), datasets.SplitGenerator( name="sup", gen_kwargs={ "data_file": os.path.join(dl_dir, "sup_ent_ids"), "split": "sup", }, ), datasets.SplitGenerator( name="ref", gen_kwargs={ "data_file": os.path.join(dl_dir, "ref_ent_ids"), "split": "ref", }, ), ] def _generate_examples(self, data_file, split): if split in ["translated_name"]: trans = json.load(open(data_file,"r")) #i = -1 for i in range(len(trans)): yield i, { "column1": str(trans[i][0]), "column2": str(trans[i][1]), "column3": None } else: f = open(data_file,"r",encoding='utf-8') data = f.readlines() for i in range(len(data)): #print(row) if self.config.name in ["source", "target"]: if split in ["ent_ids","rel_ids"]: row = data[i].strip('\n').split('\t') yield i, { "column1": row[0], "column2": row[1], "column3": None } elif split in ["rel_triples_id","rel_triples_whole","rel_triples_name"]: row = data[i].strip('\n').split('\t') yield i, { "column1": row[0], "column2": row[1], "column3": row[2] } elif split in ["attr_triples"]: row = data[i].rstrip('\n').split('\t') yield i, { "column1": row[0], "column2": row[1], "column3": row[2] } if self.config.name == "pairs": row = data[i].strip('\n').split('\t') yield i, { "left_id": row[0], "right_id": row[1] }