init
Browse files- .gitattributes +8 -0
- .gitignore +3 -1
- data/{test.jsonl → nell.test.jsonl} +0 -0
- data/{train.jsonl → nell.train.jsonl} +0 -0
- data/{validation.jsonl → nell.validation.jsonl} +0 -0
- data/nell.vocab.txt +3 -0
- data/vocab.txt +0 -0
- data/wiki.test.jsonl +3 -0
- data/wiki.train.jsonl +3 -0
- data/wiki.validation.jsonl +3 -0
- data/wiki.vocab.txt +3 -0
- link_prediction_nell_one.py +5 -5
- process.py +27 -8
- wiki.tar.gz.1 +3 -0
.gitattributes
CHANGED
@@ -55,3 +55,11 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
55 |
data/test.jsonl filter=lfs diff=lfs merge=lfs -text
|
56 |
data/train.jsonl filter=lfs diff=lfs merge=lfs -text
|
57 |
data/validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
data/test.jsonl filter=lfs diff=lfs merge=lfs -text
|
56 |
data/train.jsonl filter=lfs diff=lfs merge=lfs -text
|
57 |
data/validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
58 |
+
data/wiki.vocab.txt filter=lfs diff=lfs merge=lfs -text
|
59 |
+
data/nell.test.jsonl filter=lfs diff=lfs merge=lfs -text
|
60 |
+
data/nell.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
61 |
+
data/nell.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
62 |
+
data/nell.vocab.txt filter=lfs diff=lfs merge=lfs -text
|
63 |
+
data/wiki.test.jsonl filter=lfs diff=lfs merge=lfs -text
|
64 |
+
data/wiki.train.jsonl filter=lfs diff=lfs merge=lfs -text
|
65 |
+
data/wiki.validation.jsonl filter=lfs diff=lfs merge=lfs -text
|
.gitignore
CHANGED
@@ -1,2 +1,4 @@
|
|
1 |
NELL
|
2 |
-
nell.tar.gz
|
|
|
|
|
|
1 |
NELL
|
2 |
+
nell.tar.gz
|
3 |
+
Wiki
|
4 |
+
wiki.tar.gz
|
data/{test.jsonl → nell.test.jsonl}
RENAMED
File without changes
|
data/{train.jsonl → nell.train.jsonl}
RENAMED
File without changes
|
data/{validation.jsonl → nell.validation.jsonl}
RENAMED
File without changes
|
data/nell.vocab.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:29af9862df4777ce613d72e94c460ade35e60477704fa7a48274cc803ca7ea4f
|
3 |
+
size 2114701
|
data/vocab.txt
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/wiki.test.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c7791ec3b056017a9c6bf0ca70714c695459840bd8bb86362c0c81ea46b7ab46
|
3 |
+
size 938201
|
data/wiki.train.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7f50246ef47702b72d633e1807b79d4915e2e2054bdc6cb4c4c2aa9b9ab1b13f
|
3 |
+
size 3726937
|
data/wiki.validation.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c9097f4c1d08f56b050cddddbc15dba0c282412ec519bf994a768930db825316
|
3 |
+
size 401621
|
data/wiki.vocab.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8491e7a8cd96a74c74667adfa3a8403d6040a1aaf265f8b8d2161fb2c684d119
|
3 |
+
size 46168647
|
link_prediction_nell_one.py
CHANGED
@@ -4,7 +4,7 @@ import datasets
|
|
4 |
|
5 |
logger = datasets.logging.get_logger(__name__)
|
6 |
_DESCRIPTION = """NELL-One, a few shots link prediction dataset. """
|
7 |
-
_NAME = "
|
8 |
_VERSION = "0.0.0"
|
9 |
_CITATION = """
|
10 |
@inproceedings{xiong-etal-2018-one,
|
@@ -35,7 +35,7 @@ _URLS = {
|
|
35 |
}
|
36 |
|
37 |
|
38 |
-
class
|
39 |
"""BuilderConfig"""
|
40 |
|
41 |
def __init__(self, **kwargs):
|
@@ -43,14 +43,14 @@ class LinkPredictionNellOneConfig(datasets.BuilderConfig):
|
|
43 |
Args:
|
44 |
**kwargs: keyword arguments forwarded to super.
|
45 |
"""
|
46 |
-
super(
|
47 |
|
48 |
|
49 |
-
class
|
50 |
"""Dataset."""
|
51 |
|
52 |
BUILDER_CONFIGS = [
|
53 |
-
|
54 |
]
|
55 |
|
56 |
def _split_generators(self, dl_manager):
|
|
|
4 |
|
5 |
logger = datasets.logging.get_logger(__name__)
|
6 |
_DESCRIPTION = """NELL-One, a few shots link prediction dataset. """
|
7 |
+
_NAME = "link_prediction"
|
8 |
_VERSION = "0.0.0"
|
9 |
_CITATION = """
|
10 |
@inproceedings{xiong-etal-2018-one,
|
|
|
35 |
}
|
36 |
|
37 |
|
38 |
+
class LinkPredictionConfig(datasets.BuilderConfig):
|
39 |
"""BuilderConfig"""
|
40 |
|
41 |
def __init__(self, **kwargs):
|
|
|
43 |
Args:
|
44 |
**kwargs: keyword arguments forwarded to super.
|
45 |
"""
|
46 |
+
super(LinkPredictionConfig, self).__init__(**kwargs)
|
47 |
|
48 |
|
49 |
+
class LinkPrediction(datasets.GeneratorBasedBuilder):
|
50 |
"""Dataset."""
|
51 |
|
52 |
BUILDER_CONFIGS = [
|
53 |
+
LinkPredictionConfig(name=_NAME, version=datasets.Version(_VERSION), description=_DESCRIPTION)
|
54 |
]
|
55 |
|
56 |
def _split_generators(self, dl_manager):
|
process.py
CHANGED
@@ -1,24 +1,35 @@
|
|
1 |
"""
|
2 |
- Wiki-One https://sites.cs.ucsb.edu/~xwhan/datasets/wiki.tar.gz
|
3 |
- NELL-One https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz
|
|
|
4 |
wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz
|
5 |
tar -xzf nell.tar.gz
|
|
|
|
|
|
|
|
|
6 |
"""
|
7 |
import os
|
8 |
import json
|
9 |
from itertools import chain
|
10 |
|
11 |
-
|
|
|
12 |
os.makedirs("data", exist_ok=True)
|
13 |
|
14 |
-
if not os.path.exists(
|
15 |
raise ValueError("Please download the dataset first\n"
|
16 |
"wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz\n"
|
17 |
"tar -xzf nell.tar.gz")
|
18 |
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
def read_file(_file):
|
21 |
-
with open(
|
22 |
tmp = json.load(f_reader)
|
23 |
flatten = list(chain(*[[{"relation": r, "head": h, "tail": t} for (h, r, t) in v] for v in tmp.values()]))
|
24 |
# flatten = {}
|
@@ -28,17 +39,25 @@ def read_file(_file):
|
|
28 |
|
29 |
|
30 |
def read_vocab(_file):
|
31 |
-
with open(
|
32 |
ent2ids = json.load(f_reader)
|
33 |
return sorted(list(ent2ids.keys()))
|
34 |
|
35 |
|
36 |
if __name__ == '__main__':
|
37 |
-
vocab = read_vocab("ent2ids")
|
38 |
-
with open("data/vocab.txt", 'w') as f:
|
|
|
|
|
|
|
|
|
39 |
f.write("\n".join(vocab))
|
40 |
|
41 |
for i, s in zip(['dev_tasks.json', 'test_tasks.json', 'train_tasks.json'], ['validation', 'test', 'train']):
|
42 |
-
d = read_file(i)
|
43 |
-
with open(f"data/{s}.jsonl", "w") as f:
|
|
|
|
|
|
|
|
|
44 |
f.write("\n".join([json.dumps(_d) for _d in d]))
|
|
|
1 |
"""
|
2 |
- Wiki-One https://sites.cs.ucsb.edu/~xwhan/datasets/wiki.tar.gz
|
3 |
- NELL-One https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz
|
4 |
+
|
5 |
wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz
|
6 |
tar -xzf nell.tar.gz
|
7 |
+
|
8 |
+
wget https://sites.cs.ucsb.edu/~xwhan/datasets/wiki.tar.gz
|
9 |
+
tar -xzf wiki.tar.gz
|
10 |
+
|
11 |
"""
|
12 |
import os
|
13 |
import json
|
14 |
from itertools import chain
|
15 |
|
16 |
+
data_dir_nell = "NELL"
|
17 |
+
data_dir_wiki = "Wiki"
|
18 |
os.makedirs("data", exist_ok=True)
|
19 |
|
20 |
+
if not os.path.exists(data_dir_nell):
|
21 |
raise ValueError("Please download the dataset first\n"
|
22 |
"wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz\n"
|
23 |
"tar -xzf nell.tar.gz")
|
24 |
|
25 |
+
if not os.path.exists(data_dir_wiki):
|
26 |
+
raise ValueError("Please download the dataset first\n"
|
27 |
+
"wget https://sites.cs.ucsb.edu/~xwhan/datasets/wiki.tar.gz\n"
|
28 |
+
"tar -xzf wiki.tar.gz")
|
29 |
+
|
30 |
|
31 |
def read_file(_file):
|
32 |
+
with open(_file, 'r') as f_reader:
|
33 |
tmp = json.load(f_reader)
|
34 |
flatten = list(chain(*[[{"relation": r, "head": h, "tail": t} for (h, r, t) in v] for v in tmp.values()]))
|
35 |
# flatten = {}
|
|
|
39 |
|
40 |
|
41 |
def read_vocab(_file):
|
42 |
+
with open(_file) as f_reader:
|
43 |
ent2ids = json.load(f_reader)
|
44 |
return sorted(list(ent2ids.keys()))
|
45 |
|
46 |
|
47 |
if __name__ == '__main__':
|
48 |
+
vocab = read_vocab(f"{data_dir_nell}/ent2ids")
|
49 |
+
with open("data/nell.vocab.txt", 'w') as f:
|
50 |
+
f.write("\n".join(vocab))
|
51 |
+
|
52 |
+
vocab = read_vocab(f"{data_dir_wiki}/ent2ids")
|
53 |
+
with open("data/wiki.vocab.txt", 'w') as f:
|
54 |
f.write("\n".join(vocab))
|
55 |
|
56 |
for i, s in zip(['dev_tasks.json', 'test_tasks.json', 'train_tasks.json'], ['validation', 'test', 'train']):
|
57 |
+
d = read_file(f"{data_dir_nell}/{i}")
|
58 |
+
with open(f"data/nell.{s}.jsonl", "w") as f:
|
59 |
+
f.write("\n".join([json.dumps(_d) for _d in d]))
|
60 |
+
|
61 |
+
d = read_file(f"{data_dir_wiki}/{i}")
|
62 |
+
with open(f"data/wiki.{s}.jsonl", "w") as f:
|
63 |
f.write("\n".join([json.dumps(_d) for _d in d]))
|
wiki.tar.gz.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6eb426b1dd72890f6d0b1f55cfcf10da386272d6586f7753c7531cac1330dfeb
|
3 |
+
size 5275648
|