|
""" |
|
- Wiki-One https://sites.cs.ucsb.edu/~xwhan/datasets/wiki.tar.gz |
|
- NELL-One https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz |
|
wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz |
|
tar -xzf nell.tar.gz |
|
""" |
|
import os |
|
import json |
|
from itertools import chain |
|
|
|
data_dir = "NELL" |
|
os.makedirs("data", exist_ok=True) |
|
|
|
if not os.path.exists(data_dir): |
|
raise ValueError("Please download the dataset first\n" |
|
"wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz\n" |
|
"tar -xzf nell.tar.gz") |
|
|
|
|
|
def read_file(_file): |
|
with open(f"{data_dir}/{_file}", 'r') as f_reader: |
|
tmp = json.load(f_reader) |
|
flatten = list(chain(*[[{"relation": r, "head": h, "tail": t} for (h, r, t) in v] for v in tmp.values()])) |
|
|
|
|
|
|
|
return flatten |
|
|
|
|
|
def read_vocab(_file): |
|
with open(f"{data_dir}/{_file}") as f_reader: |
|
ent2ids = json.load(f_reader) |
|
return sorted(list(ent2ids.keys())) |
|
|
|
|
|
if __name__ == '__main__': |
|
vocab = read_vocab("ent2ids") |
|
with open("data/vocab.txt", 'w') as f: |
|
f.write("\n".join(vocab)) |
|
|
|
for i, s in zip(['dev_tasks.json', 'test_tasks.json', 'train_tasks.json'], ['validation', 'test', 'train']): |
|
d = read_file(i) |
|
with open(f"data/{s}.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(_d) for _d in d])) |
|
|