dbp15k-fr-en / dbp15k-fr-en.py
LisaWang0306's picture
Update dbp15k-fr-en.py
04700c3
raw
history blame contribute delete
No virus
9.99 kB
import datasets
import os
import pickle
import json
class Dbp15kFrEnConfig(datasets.BuilderConfig):
def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
super(Dbp15kFrEnConfig, self).__init__(version=datasets.Version("0.0.1"), **kwargs)
self.features = features
self.label_classes = label_classes
self.data_url = data_url
self.citation = citation
self.url = url
class Dbp15kFrEn(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
Dbp15kFrEnConfig(
name="source",
features=["column1", "column2", "column3"],
citation="TODO",
url="TODO",
data_url="https://huggingface.co/datasets/matchbench/dbp15k-fr-en/resolve/main/dbp15k-fr-en-src.zip"
),
Dbp15kFrEnConfig(
name="target",
features=["column1", "column2", "column3"],
citation="TODO",
url="TODO",
data_url="https://huggingface.co/datasets/matchbench/dbp15k-fr-en/resolve/main/dbp15k-fr-en-tgt.zip"
),
Dbp15kFrEnConfig(
name="pairs",
features=["left_id", "right_id"],
citation="TODO",
url="TODO",
data_url="https://huggingface.co/datasets/matchbench/dbp15k-fr-en/resolve/main/dbp15k-fr-en-pairs.zip"
),
]
def _info(self):
if self.config.name=="source":
features = {feature: datasets.Value("string") for feature in self.config.features}
elif self.config.name=="target":
features = {feature: datasets.Value("string") for feature in self.config.features}
elif self.config.name=="pairs":
features = {feature: datasets.Value("string") for feature in self.config.features}
return datasets.DatasetInfo(
features=datasets.Features(features)
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(self.config.data_url) or ""
#task_name = _get_task_name_from_data_url(self.config.data_url)
#dl_dir = os.path.join(dl_dir, task_name)
if self.config.name == "source":
return [
datasets.SplitGenerator(
name="ent_ids",
gen_kwargs={
"data_file": os.path.join(dl_dir, "ent_ids_1"),
"split": "ent_ids",
},
),
datasets.SplitGenerator(
name="rel_ids",
gen_kwargs={
"data_file": os.path.join(dl_dir, "rel_ids_1"),
"split": "rel_ids",
},
),
datasets.SplitGenerator(
name="attr_triples",
gen_kwargs={
"data_file": os.path.join(dl_dir, "att_triples_1"),
"split": "attr_triples",
},
),
datasets.SplitGenerator(
name="rel_triples",
gen_kwargs={
"data_file": os.path.join(dl_dir, "triples_1"),
"split": "rel_triples",
},
),
datasets.SplitGenerator(
name="description",
gen_kwargs={
"data_file": os.path.join(dl_dir, "description1.pkl"),
"split": "description",
},
),
datasets.SplitGenerator(
name="rel_triples_whole",
gen_kwargs={
"data_file": os.path.join(dl_dir, "rel_triples_whole_1"),
"split": "rel_triples_whole",
},
),
datasets.SplitGenerator(
name="attr_triples_whole",
gen_kwargs={
"data_file": os.path.join(dl_dir, "att_triples_whole_1"),
"split": "attr_triples_whole",
},
),
datasets.SplitGenerator(
name="translated_name",
gen_kwargs={
"data_file": os.path.join(dl_dir, "translated_name.json"),
"split": "translated_name",
},
),
]
elif self.config.name == "target":
return [
datasets.SplitGenerator(
name="ent_ids",
gen_kwargs={
"data_file": os.path.join(dl_dir, "ent_ids_2"),
"split": "ent_ids",
},
),
datasets.SplitGenerator(
name="rel_ids",
gen_kwargs={
"data_file": os.path.join(dl_dir, "rel_ids_2"),
"split": "rel_ids",
},
),
datasets.SplitGenerator(
name="attr_triples",
gen_kwargs={
"data_file": os.path.join(dl_dir, "att_triples_2"),
"split": "attr_triples",
},
),
datasets.SplitGenerator(
name="rel_triples",
gen_kwargs={
"data_file": os.path.join(dl_dir, "triples_2"),
"split": "rel_triples",
},
),
datasets.SplitGenerator(
name="description",
gen_kwargs={
"data_file": os.path.join(dl_dir, "description2.pkl"),
"split": "description",
},
),
datasets.SplitGenerator(
name="rel_triples_whole",
gen_kwargs={
"data_file": os.path.join(dl_dir, "rel_triples_whole_2"),
"split": "rel_triples_whole",
},
),
datasets.SplitGenerator(
name="attr_triples_whole",
gen_kwargs={
"data_file": os.path.join(dl_dir, "att_triples_whole_2"),
"split": "attr_triples_whole",
},
),
datasets.SplitGenerator(
name="translated_name",
gen_kwargs={
"data_file": os.path.join(dl_dir, "translated_name.json"),
"split": "translated_name",
},
),
]
elif self.config.name == "pairs":
return [
datasets.SplitGenerator(
name="train",
gen_kwargs={
"data_file": os.path.join(dl_dir, "train_links"),
"split": "train",
},
),
datasets.SplitGenerator(
name="valid",
gen_kwargs={
"data_file": os.path.join(dl_dir, "valid_links"),
"split": "valid",
},
),
datasets.SplitGenerator(
name="test",
gen_kwargs={
"data_file": os.path.join(dl_dir, "test_links"),
"split": "test",
},
),
datasets.SplitGenerator(
name="sup",
gen_kwargs={
"data_file": os.path.join(dl_dir, "sup_pairs"),
"split": "sup",
},
),
datasets.SplitGenerator(
name="ref",
gen_kwargs={
"data_file": os.path.join(dl_dir, "ref_pairs"),
"split": "ref",
},
),
]
def _generate_examples(self, data_file, split):
if split in ["description"]:
des = pickle.load(open(data_file,"rb"))
i = -1
for ent,ori_des in des.items():
i += 1
yield i, {
"column1": ent,
"column2": ori_des,
"column3": None
}
elif split in ["translated_name"]:
trans = json.load(open(data_file,"r"))
#i = -1
for i in range(len(trans)):
yield i, {
"column1": str(trans[i][0]),
"column2": str(trans[i][1]),
"column3": None
}
else:
f = open(data_file,"r",encoding='utf-8')
data = f.readlines()
for i in range(len(data)):
#print(row)
if self.config.name in ["source", "target"]:
if split in ["ent_ids","rel_ids"]:
row = data[i].strip('\n').split('\t')
yield i, {
"column1": row[0],
"column2": row[1],
"column3": None
}
elif split in ["rel_triples","rel_triples_whole"]:
row = data[i].strip('\n').split('\t')
yield i, {
"column1": row[0],
"column2": row[1],
"column3": row[2]
}
elif split in ["attr_triples","attr_triples_whole"]:
row = data[i].rstrip('\n').split(' ',2)
yield i, {
"column1": row[0],
"column2": row[1],
"column3": row[2]
}
if self.config.name == "pairs":
row = data[i].strip('\n').split('\t')
yield i, {
"left_id": row[0],
"right_id": row[1]
}