Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
asahi417 commited on
Commit
14cb2fa
·
1 Parent(s): 2c48303
.gitattributes CHANGED
@@ -52,3 +52,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
55
+ data/test.jsonl filter=lfs diff=lfs merge=lfs -text
56
+ data/train.jsonl filter=lfs diff=lfs merge=lfs -text
57
+ data/validation.jsonl filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ NELL
2
+ nell.tar.gz
README.md ADDED
File without changes
data/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a94b344bf4b4b721f9ca1b96813b497c1f87eac795e393e321d370c5cb1dd1e
3
+ size 275455
data/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59fcc5f746777fcda4058a0bf8cb6a55e03bde3b3ffdf6716a259a0fe2740374
3
+ size 1071208
data/validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dce6d2c53f2d4d9e7390033fcc787b5b405a06d13141e5affa5b3b43561657f5
3
+ size 116970
data/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
link_prediction_nell_one.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+
5
+ logger = datasets.logging.get_logger(__name__)
6
+ _DESCRIPTION = """NELL-One, a few shots link prediction dataset. """
7
+ _NAME = "link_prediction_nell_one"
8
+ _VERSION = "0.0.0"
9
+ _CITATION = """
10
+ @inproceedings{xiong-etal-2018-one,
11
+ title = "One-Shot Relational Learning for Knowledge Graphs",
12
+ author = "Xiong, Wenhan and
13
+ Yu, Mo and
14
+ Chang, Shiyu and
15
+ Guo, Xiaoxiao and
16
+ Wang, William Yang",
17
+ booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
18
+ month = oct # "-" # nov,
19
+ year = "2018",
20
+ address = "Brussels, Belgium",
21
+ publisher = "Association for Computational Linguistics",
22
+ url = "https://aclanthology.org/D18-1223",
23
+ doi = "10.18653/v1/D18-1223",
24
+ pages = "1980--1990",
25
+ abstract = "Knowledge graphs (KG) are the key components of various natural language processing applications. To further expand KGs{'} coverage, previous studies on knowledge graph completion usually require a large number of positive examples for each relation. However, we observe long-tail relations are actually more common in KGs and those newly added relations often do not have many known triples for training. In this work, we aim at predicting new facts under a challenging setting where only one training instance is available. We propose a one-shot relational learning framework, which utilizes the knowledge distilled by embedding models and learns a matching metric by considering both the learned embeddings and one-hop graph structures. Empirically, our model yields considerable performance improvements over existing embedding models, and also eliminates the need of re-training the embedding models when dealing with newly added relations.",
26
+ }
27
+ """
28
+
29
+ _HOME_PAGE = "https://github.com/asahi417/relbert"
30
+ _URL = f'https://huggingface.co/datasets/relbert/{_NAME}/resolve/main/data'
31
+ _URLS = {
32
+ str(datasets.Split.TRAIN): [f'{_URL}/train.jsonl'],
33
+ str(datasets.Split.VALIDATION): [f'{_URL}/valid.jsonl'],
34
+ str(datasets.Split.TEST): [f'{_URL}/test.jsonl'],
35
+ }
36
+
37
+
38
+ class LinkPredictionNellOneConfig(datasets.BuilderConfig):
39
+ """BuilderConfig"""
40
+
41
+ def __init__(self, **kwargs):
42
+ """BuilderConfig.
43
+ Args:
44
+ **kwargs: keyword arguments forwarded to super.
45
+ """
46
+ super(LinkPredictionNellOneConfig, self).__init__(**kwargs)
47
+
48
+
49
+ class LinkPredictionNellOne(datasets.GeneratorBasedBuilder):
50
+ """Dataset."""
51
+
52
+ BUILDER_CONFIGS = [
53
+ LinkPredictionNellOneConfig(name=_NAME, version=datasets.Version(_VERSION), description=_DESCRIPTION)
54
+ ]
55
+
56
+ def _split_generators(self, dl_manager):
57
+ downloaded_file = dl_manager.download_and_extract(_URLS)
58
+ return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
59
+ for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION]]
60
+
61
+ def _generate_examples(self, filepaths):
62
+ _key = 0
63
+ for filepath in filepaths:
64
+ logger.info(f"generating examples from = {filepath}")
65
+ with open(filepath, encoding="utf-8") as f:
66
+ _list = [i for i in f.read().split('\n') if len(i) > 0]
67
+ for i in _list:
68
+ data = json.loads(i)
69
+ yield _key, data
70
+ _key += 1
71
+
72
+ def _info(self):
73
+ return datasets.DatasetInfo(
74
+ description=_DESCRIPTION,
75
+ features=datasets.Features(
76
+ {
77
+ "relation": datasets.Value("string"),
78
+ "head": datasets.Value("string"),
79
+ "tail": datasets.Value("string"),
80
+ }
81
+ ),
82
+ supervised_keys=None,
83
+ homepage=_HOME_PAGE,
84
+ citation=_CITATION,
85
+ )
process.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ - Wiki-One https://sites.cs.ucsb.edu/~xwhan/datasets/wiki.tar.gz
3
+ - NELL-One https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz
4
+ wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz
5
+ tar -xzf nell.tar.gz
6
+ """
7
+ import os
8
+ import json
9
+ from itertools import chain
10
+
11
+ data_dir = "NELL"
12
+ os.makedirs("data", exist_ok=True)
13
+
14
+ if not os.path.exists(data_dir):
15
+ raise ValueError("Please download the dataset first\n"
16
+ "wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz\n"
17
+ "tar -xzf nell.tar.gz")
18
+
19
+
20
+ def read_file(_file):
21
+ with open(f"{data_dir}/{_file}", 'r') as f_reader:
22
+ tmp = json.load(f_reader)
23
+ flatten = list(chain(*[[{"relation": r, "head": h, "tail": t} for (h, r, t) in v] for v in tmp.values()]))
24
+ # flatten = {}
25
+ # for k, v in tmp.items():
26
+ # flatten[k] = [{"relation": r, "head": h, "tail": t} for (h, r, t) in v]
27
+ return flatten
28
+
29
+
30
+ def read_vocab(_file):
31
+ with open(f"{data_dir}/{_file}") as f_reader:
32
+ ent2ids = json.load(f_reader)
33
+ return sorted(list(ent2ids.keys()))
34
+
35
+
36
+ if __name__ == '__main__':
37
+ vocab = read_vocab("ent2ids")
38
+ with open("data/vocab.txt", 'w') as f:
39
+ f.write("\n".join(vocab))
40
+
41
+ for i, s in zip(['dev_tasks.json', 'test_tasks.json', 'train_tasks.json'], ['validation', 'test', 'train']):
42
+ d = read_file(i)
43
+ with open(f"data/{s}.jsonl", "w") as f:
44
+ f.write("\n".join([json.dumps(_d) for _d in d]))