parquet-converter commited on
Commit
4f084f1
1 Parent(s): 8d35ec4

Update parquet files

Browse files
dwy100k-d-w.py DELETED
@@ -1,176 +0,0 @@
1
- import datasets
2
- import os
3
- import pickle
4
- import json
5
-
6
- class Dwy100kDWConfig(datasets.BuilderConfig):
7
-
8
- def __init__(self, features, data_url, citation, url, label_classes=("False", "True"), **kwargs):
9
- super(Dwy100kDWConfig, self).__init__(version=datasets.Version("0.0.1"), **kwargs)
10
- self.features = features
11
- self.label_classes = label_classes
12
- self.data_url = data_url
13
- self.citation = citation
14
- self.url = url
15
-
16
-
17
- class Dwy100kDW(datasets.GeneratorBasedBuilder):
18
-
19
- BUILDER_CONFIGS = [
20
- Dwy100kDWConfig(
21
- name="source",
22
- features=["column1", "column2", "column3"],
23
- citation="TODO",
24
- url="TODO",
25
- data_url="https://huggingface.co/datasets/matchbench/dwy100k-d-w/resolve/main/dwy-dbp-wd-100k.zip"
26
- ),
27
- Dwy100kDWConfig(
28
- name="target",
29
- features=["column1", "column2", "column3"],
30
- citation="TODO",
31
- url="TODO",
32
- data_url="https://huggingface.co/datasets/matchbench/dwy100k-d-w/resolve/main/dwy-dbp-wd-100k.zip"
33
- ),
34
- Dwy100kDWConfig(
35
- name="pairs",
36
- features=["left_id", "right_id"],
37
- citation="TODO",
38
- url="TODO",
39
- data_url="https://huggingface.co/datasets/matchbench/dwy100k-d-w/resolve/main/dwy-dbp-wd-100k.zip"
40
- ),
41
- ]
42
-
43
- def _info(self):
44
- if self.config.name=="source":
45
- features = {feature: datasets.Value("string") for feature in self.config.features}
46
- elif self.config.name=="target":
47
- features = {feature: datasets.Value("string") for feature in self.config.features}
48
- elif self.config.name=="pairs":
49
- features = {feature: datasets.Value("string") for feature in self.config.features}
50
-
51
- return datasets.DatasetInfo(
52
- features=datasets.Features(features)
53
- )
54
-
55
- def _split_generators(self, dl_manager):
56
- dl_dir = dl_manager.download_and_extract(self.config.data_url) or ""
57
- #task_name = _get_task_name_from_data_url(self.config.data_url)
58
- #dl_dir = os.path.join(dl_dir, task_name)
59
- if self.config.name == "source":
60
- return [
61
- datasets.SplitGenerator(
62
- name="ent_ids",
63
- gen_kwargs={
64
- "data_file": os.path.join(dl_dir, "ent_ids_1"),
65
- "split": "ent_ids",
66
- },
67
- ),
68
- datasets.SplitGenerator(
69
- name="rel_triples_id",
70
- gen_kwargs={
71
- "data_file": os.path.join(dl_dir, "triples_1"),
72
- "split": "rel_triples_id",
73
- },
74
- ),
75
- ]
76
- elif self.config.name == "target":
77
- return [
78
- datasets.SplitGenerator(
79
- name="ent_ids",
80
- gen_kwargs={
81
- "data_file": os.path.join(dl_dir, "ent_ids_2"),
82
- "split": "ent_ids",
83
- },
84
- ),
85
- datasets.SplitGenerator(
86
- name="rel_triples_id",
87
- gen_kwargs={
88
- "data_file": os.path.join(dl_dir, "triples_2"),
89
- "split": "rel_triples_id",
90
- },
91
- )
92
- ]
93
- elif self.config.name == "pairs":
94
- return [
95
- datasets.SplitGenerator(
96
- name="train",
97
- gen_kwargs={
98
- "data_file": os.path.join(dl_dir, "train_ent_ids"),
99
- "split": "train",
100
- },
101
- ),
102
- datasets.SplitGenerator(
103
- name="valid",
104
- gen_kwargs={
105
- "data_file": os.path.join(dl_dir, "valid_ent_ids"),
106
- "split": "valid",
107
- },
108
- ),
109
- datasets.SplitGenerator(
110
- name="test",
111
- gen_kwargs={
112
- "data_file": os.path.join(dl_dir, "ref_ent_ids"),
113
- "split": "test",
114
- },
115
- ),
116
- datasets.SplitGenerator(
117
- name="sup",
118
- gen_kwargs={
119
- "data_file": os.path.join(dl_dir, "sup_ent_ids"),
120
- "split": "sup",
121
- },
122
- ),
123
- datasets.SplitGenerator(
124
- name="ref",
125
- gen_kwargs={
126
- "data_file": os.path.join(dl_dir, "ref_ent_ids"),
127
- "split": "ref",
128
- },
129
- ),
130
- ]
131
-
132
-
133
- def _generate_examples(self, data_file, split):
134
- if split in ["translated_name"]:
135
- trans = json.load(open(data_file,"r"))
136
- #i = -1
137
- for i in range(len(trans)):
138
- yield i, {
139
- "column1": str(trans[i][0]),
140
- "column2": str(trans[i][1]),
141
- "column3": None
142
- }
143
- else:
144
- f = open(data_file,"r",encoding='utf-8')
145
- data = f.readlines()
146
- for i in range(len(data)):
147
- #print(row)
148
- if self.config.name in ["source", "target"]:
149
- if split in ["ent_ids","rel_ids"]:
150
- row = data[i].strip('\n').split('\t')
151
- yield i, {
152
- "column1": row[0],
153
- "column2": row[1],
154
- "column3": None
155
- }
156
- elif split in ["rel_triples_id","rel_triples_whole","rel_triples_name"]:
157
- row = data[i].strip('\n').split('\t')
158
- yield i, {
159
- "column1": row[0],
160
- "column2": row[1],
161
- "column3": row[2]
162
- }
163
- elif split in ["attr_triples"]:
164
- row = data[i].rstrip('\n').split('\t')
165
- yield i, {
166
- "column1": row[0],
167
- "column2": row[1],
168
- "column3": row[2]
169
- }
170
-
171
- if self.config.name == "pairs":
172
- row = data[i].strip('\n').split('\t')
173
- yield i, {
174
- "left_id": row[0],
175
- "right_id": row[1]
176
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dwy-dbp-wd-100k.zip → pairs/dwy100k-d-w-ref.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f2cf34e0a4c5edef7bf486bd9e69e59fcf9699bc680b1a848af43f6660268d4e
3
- size 9626022
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a006837a616d148fbb615bc1cc94c7c13cc5f4da1b5fe4a31cef301b6cbad6fd
3
+ size 771855
pairs/dwy100k-d-w-sup.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1845a1a24768362a048871ac3d4664ed06ac110d41186cf2df548f7d1dd6bd14
3
+ size 423728
pairs/dwy100k-d-w-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a006837a616d148fbb615bc1cc94c7c13cc5f4da1b5fe4a31cef301b6cbad6fd
3
+ size 771855
pairs/dwy100k-d-w-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b43cd9027018263737d09f5d7580835c4be47943f20df36815be6646b5ad75a
3
+ size 282963
pairs/dwy100k-d-w-valid.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cba1a6f8872e06abc503fa2678ca1b4c7b4a7a10dca61c9554c72a6a5898906f
3
+ size 141725
source/dwy100k-d-w-ent_ids.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55a5cb323f36684bea5e5a23ec49c180feba4444cd45e11d72cd19c34bb99281
3
+ size 2666245
source/dwy100k-d-w-rel_triples_id.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2074c7248ac518d41d96d154ef84edc4af2d9163981cce2d4958f30223a0afe
3
+ size 7362026
target/dwy100k-d-w-ent_ids.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88c823db5e6b3746a95b7f15b0412545b8871678fc07d6bfc05711918dadc135
3
+ size 1583430
target/dwy100k-d-w-rel_triples_id.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52cec1b3f85deb4b9944ee5abc72ff56adc614f3c7258c453084a57f4f69f31f
3
+ size 7085670