selfkg-dwy100k-dbpyg / selfkg-dwy100k-dbpyg.py
HumorRP's picture
Update selfkg-dwy100k-dbpyg.py
959c055
raw
history blame contribute delete
No virus
7.3 kB
import json
import os
import pickle
import datasets
logger = datasets.logging.get_logger(__name__)
# _SUBFIELD = "yg"
# _VERSION = "1.0.0"
# _DESCRIPTION = """\
# DWY100k-yg is a large-scale monolingual dataset extracted from DBpedia and YAGO3. The suffix yg means DBpedia
# to YAGO3. And DWY100k-yg has 100,000 reference entity alignments.
# """
# _CITATION = """\
# @inproceedings{sun2018bootstrapping,
# title={Bootstrapping Entity Alignment with Knowledge Graph Embedding.},
# author={Sun, Zequn and Hu, Wei and Zhang, Qingheng and Qu, Yuzhong},
# booktitle={IJCAI},
# volume={18},
# pages={4396--4402},
# year={2018}
# }
# """
# _URL = "https://dl.acm.org/doi/10.1145/3485447.3511945"
# _PREFIX = "https://huggingface.co/datasets/matchbench/selfkg-dwy100k-dbpyg"
# _URLS = {
# "source": f"{_PREFIX}/resolve/main/selfkg-dwy100k-dbp{_SUBFIELD}-src.zip",
# "target": f"{_PREFIX}/resolve/main/selfkg-dwy100k-dbp{_SUBFIELD}-tgt.zip",
# "pairs": f"{_PREFIX}/resolve/main/selfkg-dwy100k-dbp{_SUBFIELD}-pairs.zip",
# }
class SelfkgDwy100kygConfig(datasets.BuilderConfig):
"""BuilderConfig for Selfkg-DWY100k."""
def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
"""
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(SelfkgDwy100kygConfig, self).__init__(**kwargs)
self.features = features
self.label_classes = label_classes
self.data_url = data_url
self.citation = citation
self.url = url
class DWY100kYg(datasets.GeneratorBasedBuilder):
"""DWY100k-yg: A Entity Alignment Dataset. From DBpedia to YAGO3."""
BUILDER_CONFIGS = [
SelfkgDwy100kygConfig(
name="source",
features=["column1", "column2", "column3"],
citation="TODO",
url="TODO",
data_url="https://huggingface.co/datasets/matchbench/selfkg-dwy100k-dbpyg/resolve/main/selfkg-dwy100k-dbpyg.zip"
),
SelfkgDwy100kygConfig(
name="target",
features=["column1", "column2", "column3"],
citation="TODO",
url="TODO",
data_url="https://huggingface.co/datasets/matchbench/selfkg-dwy100k-dbpyg/resolve/main/selfkg-dwy100k-dbpyg.zip"
),
SelfkgDwy100kygConfig(
name="pairs",
features=["left_id","right_id"],
citation="TODO",
url="TODO",
data_url="https://huggingface.co/datasets/matchbench/selfkg-dwy100k-dbpyg/resolve/main/selfkg-dwy100k-dbpyg.zip"
),
]
def _info(self) -> datasets.DatasetInfo:
if self.config.name=="source":
features = {feature: datasets.Value("string") for feature in self.config.features}
elif self.config.name=="target":
features = {feature: datasets.Value("string") for feature in self.config.features}
elif self.config.name=="pairs":
features = {feature: datasets.Value("int32") for feature in self.config.features}
return datasets.DatasetInfo(features = datasets.Features(features))
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(self.config.data_url) or ""
if self.config.name == "source":
return [
datasets.SplitGenerator(
name="ent_ids",
gen_kwargs={
"data_file": os.path.join(dl_dir, "id_ent_1"),
"split": "ent_ids",
},
),
datasets.SplitGenerator(
name="rel_triples_id",
gen_kwargs={
"data_file": os.path.join(dl_dir, "triples_1"),
"split": "rel_triples_id",
},
),
datasets.SplitGenerator(
name="LaBSE_emb",
gen_kwargs={
"data_file": os.path.join(dl_dir, "raw_LaBSE_emb_1.pkl"),
"split": "LaBSE_emb",
},
),
]
elif self.config.name == "target":
return [
datasets.SplitGenerator(
name="ent_ids",
gen_kwargs={
"data_file": os.path.join(dl_dir, "id_ent_2"),
"split": "ent_ids",
},
),
datasets.SplitGenerator(
name="rel_triples_id",
gen_kwargs={
"data_file": os.path.join(dl_dir, "triples_2"),
"split": "rel_triples_id",
},
),
datasets.SplitGenerator(
name="LaBSE_emb",
gen_kwargs={
"data_file": os.path.join(dl_dir, "raw_LaBSE_emb_2.pkl"),
"split": "LaBSE_emb",
},
),
]
elif self.config.name == "pairs":
return [
datasets.SplitGenerator(
name="train",
gen_kwargs={
"data_file": os.path.join(dl_dir, "ref_ent_ids"),
"split": "train",
},
),
datasets.SplitGenerator(
name="valid",
gen_kwargs={
"data_file": os.path.join(dl_dir, "valid.ref"),
"split": "valid",
},
),
datasets.SplitGenerator(
name="test",
gen_kwargs={
"data_file": os.path.join(dl_dir, "ref_ent_ids"),
"split": "test",
},
),
]
def _generate_examples(self, data_file, split):
if split in ["LaBSE_emb"]:
des = pickle.load(open(data_file,"rb"))
i = -1
for ent_ids,ori_emb in des.items():
i += 1
yield i, {
"column1": ent_ids,
"column2": ori_emb,
"column3": None
}
else:
f = open(data_file,"r", encoding='utf-8')
data = f.readlines()
for i in range(len(data)):
if self.config.name in ["source", "target"]:
if split in ["ent_ids"]:
row = data[i].strip('\n').split('\t')
yield i, {
"column1": row[0],
"column2": row[1],
"column3": None
}
elif split in ["rel_triples_id"]:
row = data[i].strip('\n').split('\t')
yield i, {
"column1": row[0],
"column2": row[1],
"column3": row[2]
}
if self.config.name == "pairs":
row = data[i].strip('\n').split('\t')
yield i, {
"left_id": row[0],
"right_id": row[1]
}