HumorRP commited on
Commit
b0d91ee
1 Parent(s): 20d2b20

Upload selfkg-dwy100k-dbpyg.py

Browse files
Files changed (1) hide show
  1. selfkg-dwy100k-dbpyg.py +194 -0
selfkg-dwy100k-dbpyg.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import pickle
4
+
5
+ import datasets
6
+
7
+ logger = datasets.logging.get_logger(__name__)
8
+
9
+ # _SUBFIELD = "yg"
10
+
11
+ # _VERSION = "1.0.0"
12
+
13
+ # _DESCRIPTION = """\
14
+ # DWY100k-yg is a large-scale monolingual dataset extracted from DBpedia and YAGO3. The suffix yg means DBpedia
15
+ # to YAGO3. And DWY100k-yg has 100,000 reference entity alignments.
16
+ # """
17
+
18
+ # _CITATION = """\
19
+ # @inproceedings{sun2018bootstrapping,
20
+ # title={Bootstrapping Entity Alignment with Knowledge Graph Embedding.},
21
+ # author={Sun, Zequn and Hu, Wei and Zhang, Qingheng and Qu, Yuzhong},
22
+ # booktitle={IJCAI},
23
+ # volume={18},
24
+ # pages={4396--4402},
25
+ # year={2018}
26
+ # }
27
+ # """
28
+
29
+ # _URL = "https://dl.acm.org/doi/10.1145/3485447.3511945"
30
+
31
+ # _PREFIX = "https://huggingface.co/datasets/matchbench/selfkg-dwy100k-dbpyg"
32
+
33
+ # _URLS = {
34
+ # "source": f"{_PREFIX}/resolve/main/selfkg-dwy100k-dbp{_SUBFIELD}-src.zip",
35
+ # "target": f"{_PREFIX}/resolve/main/selfkg-dwy100k-dbp{_SUBFIELD}-tgt.zip",
36
+ # "pairs": f"{_PREFIX}/resolve/main/selfkg-dwy100k-dbp{_SUBFIELD}-pairs.zip",
37
+ # }
38
+
39
+ class SelfkgDwy100kygConfig(datasets.BuilderConfig):
40
+ """BuilderConfig for Selfkg-DWY100k."""
41
+
42
+ def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
43
+ """
44
+ Args:
45
+ **kwargs: keyword arguments forwarded to super.
46
+ """
47
+ super(SelfkgDwy100kygConfig, self).__init__(**kwargs)
48
+ self.features = features
49
+ self.label_classes = label_classes
50
+ self.data_url = data_url
51
+ self.citation = citation
52
+ self.url = url
53
+
54
+ class DWY100kYg(datasets.GeneratorBasedBuilder):
55
+ """DWY100k-yg: A Entity Alignment Dataset. From DBpedia to YAGO3."""
56
+
57
+ BUILDER_CONFIGS = [
58
+ SelfkgDwy100kygConfig(
59
+ name="source",
60
+ features=["column1", "column2", "column3"],
61
+ citation="TODO",
62
+ url="TODO",
63
+ data_url="https://huggingface.co/datasets/matchbench/selfkg-dwy100k-dbpyg/resolve/main/selfkg-dwy100k-dbpyg.zip"
64
+ ),
65
+ SelfkgDwy100kygConfig(
66
+ name="target",
67
+ features=["column1", "column2", "column3"],
68
+ citation="TODO",
69
+ url="TODO",
70
+ data_url="https://huggingface.co/datasets/matchbench/selfkg-dwy100k-dbpyg/resolve/main/selfkg-dwy100k-dbpyg.zip"
71
+ ),
72
+ SelfkgDwy100kygConfig(
73
+ name="pairs",
74
+ features=["left_id","right_id"],
75
+ citation="TODO",
76
+ url="TODO",
77
+ data_url="https://huggingface.co/datasets/matchbench/selfkg-dwy100k-dbpyg/resolve/main/selfkg-dwy100k-dbpyg.zip"
78
+ ),
79
+ ]
80
+
81
+ def _info(self) -> datasets.DatasetInfo:
82
+ if self.config.name=="source":
83
+ features = {feature: datasets.Value("string") for feature in self.config.features}
84
+ elif self.config.name=="target":
85
+ features = {feature: datasets.Value("string") for feature in self.config.features}
86
+ elif self.config.name=="pairs":
87
+ features = {feature: datasets.Value("int32") for feature in self.config.features}
88
+
89
+ return datasets.DatasetInfo(features = datasets.Features(features))
90
+
91
+
92
+ def _split_generators(self, dl_manager):
93
+
94
+ dl_dir = dl_manager.download_and_extract(self.config.data_url) or ""
95
+ if self.config.name == "source":
96
+ return [
97
+ datasets.SplitGenerator(
98
+ name="ent_ids",
99
+ gen_kwargs={
100
+ "data_file": os.path.join(dl_dir, "id_ent_1"),
101
+ "split": "ent_ids",
102
+ },
103
+ ),
104
+ datasets.SplitGenerator(
105
+ name="rel_triples_id",
106
+ gen_kwargs={
107
+ "data_file": os.path.join(dl_dir, "triples_1"),
108
+ "split": "rel_triples_id",
109
+ },
110
+ ),
111
+ datasets.SplitGenerator(
112
+ name="LaBSE_emb",
113
+ gen_kwargs={
114
+ "data_file": os.path.join(dl_dir, "raw_LaBSE_emb_1.pkl"),
115
+ "split": "LaBSE_emb",
116
+ },
117
+ ),
118
+ ]
119
+ elif self.config.name == "target":
120
+ return [
121
+ datasets.SplitGenerator(
122
+ name="ent_ids",
123
+ gen_kwargs={
124
+ "data_file": os.path.join(dl_dir, "id_ent_2"),
125
+ "split": "ent_ids",
126
+ },
127
+ ),
128
+ datasets.SplitGenerator(
129
+ name="rel_triples_id",
130
+ gen_kwargs={
131
+ "data_file": os.path.join(dl_dir, "triples_2"),
132
+ "split": "rel_triples_id",
133
+ },
134
+ ),
135
+ datasets.SplitGenerator(
136
+ name="LaBSE_emb",
137
+ gen_kwargs={
138
+ "data_file": os.path.join(dl_dir, "raw_LaBSE_emb_2.pkl"),
139
+ "split": "LaBSE_emb",
140
+ },
141
+ ),
142
+ ]
143
+ elif self.config.name == "pairs":
144
+ return [
145
+ datasets.SplitGenerator(
146
+ name="ref",
147
+ gen_kwargs={
148
+ "data_file": os.path.join(dl_dir, "ref_ent_ids"),
149
+ "split": "ref",
150
+ },
151
+ ),
152
+ datasets.SplitGenerator(
153
+ name="valid",
154
+ gen_kwargs={
155
+ "data_file": os.path.join(dl_dir, "valid.ref"),
156
+ "split": "valid",
157
+ },
158
+ ),
159
+ ]
160
+
161
+ def _generate_examples(self, data_file, split):
162
+ if split in ["LaBSE_emb"]:
163
+ des = pickle.load(open(data_file,"rb"))
164
+ i = -1
165
+ for ent_ids,ori_emb in des.items():
166
+ i += 1
167
+ yield i, {
168
+ "column1": ent_ids,
169
+ "column2": ori_emb
170
+ }
171
+ else:
172
+ f = open(data_file,"r", encoding='utf-8')
173
+ data = f.readlines()
174
+ for i in range(len(data)):
175
+ if self.config.name in ["source", "target"]:
176
+ if split in ["ent_ids"]:
177
+ row = data[i].strip('\n').split('\t')
178
+ yield i, {
179
+ "column1": row[0],
180
+ "column2": row[1],
181
+ }
182
+ elif split in ["rel_triples_id"]:
183
+ row = data[i].strip('\n').split('\t')
184
+ yield i, {
185
+ "column1": row[0],
186
+ "column2": row[1],
187
+ "column3": row[2]
188
+ }
189
+ if self.config.name == "pairs":
190
+ row = data[i].strip('\n').split('\t')
191
+ yield i, {
192
+ "left_id": row[0],
193
+ "right_id": row[1]
194
+ }