LisaWang0306 commited on
Commit
5cd0931
1 Parent(s): 13e9758

Upload dbp15k-fr-en.py

Browse files
Files changed (1) hide show
  1. dbp15k-fr-en.py +170 -0
dbp15k-fr-en.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import os
3
+
4
+ class EntityAlignmentConfig(datasets.BuilderConfig):
5
+ """BuilderConfig for SuperGLUE."""
6
+
7
+ def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
8
+ """BuilderConfig for SuperGLUE.
9
+ Args:
10
+ features: `list[string]`, list of the features that will appear in the
11
+ feature dict. Should not include "label".
12
+ data_url: `string`, url to download the zip file from.
13
+ citation: `string`, citation for the data set.
14
+ url: `string`, url for information about the data set.
15
+ label_classes: `list[string]`, the list of classes for the label if the
16
+ label is present as a string. Non-string labels will be cast to either
17
+ 'False' or 'True'.
18
+ **kwargs: keyword arguments forwarded to super.
19
+ """
20
+ # Version history:
21
+ # 1.0.3: Fix not including entity position in ReCoRD.
22
+ # 1.0.2: Fixed non-nondeterminism in ReCoRD.
23
+ # 1.0.1: Change from the pre-release trial version of SuperGLUE (v1.9) to
24
+ # the full release (v2.0).
25
+ # 1.0.0: S3 (new shuffling, sharding and slicing mechanism).
26
+ # 0.0.2: Initial version.
27
+ super(EntityAlignmentConfig, self).__init__(version=datasets.Version("1.0.3"), **kwargs)
28
+ self.features = features
29
+ self.label_classes = label_classes
30
+ self.data_url = data_url
31
+ self.citation = citation
32
+ self.url = url
33
+
34
+
35
+ class EntityAlignment(datasets.GeneratorBasedBuilder):
36
+ """The SuperGLUE benchmark."""
37
+
38
+ BUILDER_CONFIGS = [
39
+ EntityAlignmentConfig(
40
+ name="source",
41
+ features=["column1", "column2", "column3"],
42
+ data_url="https://www.dropbox.com/s/j55ly9i7w7t4tnn/dbp15k-fr-en-src.zip"
43
+ ),
44
+ EntityAlignmentConfig(
45
+ name="target",
46
+ features=["column1", "column2", "column3"],
47
+ data_url="https://www.dropbox.com/s/eo2huntzhfti1p1/dbp15k-fr-en-tgt.zip"
48
+ ),
49
+ EntityAlignmentConfig(
50
+ name="pairs",
51
+ features=["left_id", "right_id"],
52
+ data_url="https://www.dropbox.com/s/5lhkfka1imuum1o/dbp15k-fr-en-pairs.zip"
53
+ ),
54
+ ]
55
+
56
+ def _info(self):
57
+ features = {feature: datasets.Value("string") for feature in self.config.features}
58
+
59
+ return datasets.DatasetInfo(
60
+ features=datasets.Features(features)
61
+ )
62
+
63
+ def _split_generators(self, dl_manager):
64
+ dl_dir = dl_manager.download_and_extract(self.config.data_url) or ""
65
+ #task_name = _get_task_name_from_data_url(self.config.data_url)
66
+ #dl_dir = os.path.join(dl_dir, task_name)
67
+ if self.config.name == "source":
68
+ return [
69
+ datasets.SplitGenerator(
70
+ name="ent_ids",
71
+ gen_kwargs={
72
+ "data_file": os.path.join(dl_dir, "ent_ids_1"),
73
+ "split": "ent_ids",
74
+ },
75
+ ),
76
+ datasets.SplitGenerator(
77
+ name="rel_ids",
78
+ gen_kwargs={
79
+ "data_file": os.path.join(dl_dir, "rel_ids_1"),
80
+ "split": "rel_ids",
81
+ },
82
+ ),
83
+ datasets.SplitGenerator(
84
+ name="attr_triples",
85
+ gen_kwargs={
86
+ "data_file": os.path.join(dl_dir, "attr_triples_1"),
87
+ "split": "attr_triples",
88
+ },
89
+ ),
90
+ datasets.SplitGenerator(
91
+ name="rel_triples",
92
+ gen_kwargs={
93
+ "data_file": os.path.join(dl_dir, "triples_1"),
94
+ "split": "rel_triples",
95
+ },
96
+ ),
97
+ ]
98
+ elif self.config.name == "target":
99
+ return [
100
+ datasets.SplitGenerator(
101
+ name="ent_ids",
102
+ gen_kwargs={
103
+ "data_file": os.path.join(dl_dir, "ent_ids_2"),
104
+ "split": "ent_ids",
105
+ },
106
+ ),
107
+ datasets.SplitGenerator(
108
+ name="rel_ids",
109
+ gen_kwargs={
110
+ "data_file": os.path.join(dl_dir, "rel_ids_2"),
111
+ "split": "rel_ids",
112
+ },
113
+ ),
114
+ datasets.SplitGenerator(
115
+ name="attr_triples",
116
+ gen_kwargs={
117
+ "data_file": os.path.join(dl_dir, "attr_triples_2"),
118
+ "split": "attr_triples",
119
+ },
120
+ ),
121
+ datasets.SplitGenerator(
122
+ name="rel_triples",
123
+ gen_kwargs={
124
+ "data_file": os.path.join(dl_dir, "triples_2"),
125
+ "split": "rel_triples",
126
+ },
127
+ ),
128
+ ]
129
+ elif self.config.name == "pairs":
130
+ return [
131
+ datasets.SplitGenerator(
132
+ name="train",
133
+ gen_kwargs={
134
+ "data_file": os.path.join(dl_dir, "sup_pairs"),
135
+ "split": "train",
136
+ },
137
+ ),
138
+ datasets.SplitGenerator(
139
+ name="test",
140
+ gen_kwargs={
141
+ "data_file": os.path.join(dl_dir, "ref_pairs"),
142
+ "split": "test",
143
+ },
144
+ ),
145
+ ]
146
+
147
+
148
+ def _generate_examples(self, data_file, split):
149
+ f = open(data_file,"r",encoding='utf-8')
150
+ data = f.readlines()
151
+ for i in range(len(data)):
152
+ row = data[i].strip('\n').split('\t')
153
+ if self.config.name in ["source", "target"]:
154
+ if split in ["ent_ids","rel_ids"]:
155
+ yield i, {
156
+ "column1": row[0],
157
+ "column2": row[1],
158
+ "column3": None
159
+ }
160
+ else:
161
+ yield i, {
162
+ "column1": row[0],
163
+ "column2": row[1],
164
+ "column3": row[2]
165
+ }
166
+ if self.config.name == "pairs":
167
+ yield i, {
168
+ "left_id": row[0],
169
+ "right_id": row[1]
170
+ }