|
""" |
|
- Wiki-One https://sites.cs.ucsb.edu/~xwhan/datasets/wiki.tar.gz |
|
- NELL-One https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz |
|
|
|
wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz |
|
tar -xzf nell.tar.gz |
|
|
|
wget https://sites.cs.ucsb.edu/~xwhan/datasets/wiki.tar.gz |
|
tar -xzf wiki.tar.gz |
|
|
|
""" |
|
import os |
|
import json |
|
import re |
|
from itertools import chain |
|
|
|
data_dir_nell = "NELL" |
|
data_dir_wiki = "Wiki" |
|
os.makedirs("data", exist_ok=True) |
|
|
|
short = ['alcs', "uk", "us", "usa", "npr", "nbc", "bbc", "cnn", "abc", "cbs", "nfl", "mlb", "nba", "nhl", "pga", "ncaa", |
|
"wjhu", "pbs", "un"] |
|
non_entity_types = [ |
|
'academicfield', |
|
'agent', |
|
'agriculturalproduct', |
|
'amphibian', |
|
'animal', |
|
'aquarium', |
|
'arachnid', |
|
'architect', |
|
'arthropod', |
|
'bakedgood', |
|
'bathroomitem', |
|
'bedroomitem', |
|
'beverage', |
|
'bird', |
|
'blog', |
|
'bodypart', |
|
'bone', |
|
'candy', |
|
'cave', |
|
'chemical', |
|
'clothing', |
|
'coffeedrink', |
|
'condiment', |
|
'crimeorcharge', |
|
'crustacean', |
|
'date', |
|
'dateliteral', |
|
'economicsector', |
|
'fish', |
|
'food', |
|
'fruit', |
|
'fungus', |
|
'furniture', |
|
'grain', |
|
'hallwayitem', |
|
'hobby', |
|
'insect', |
|
'invertebrate', |
|
'jobposition', |
|
'kitchenitem', |
|
'landscapefeatures', |
|
'legume', |
|
'location', |
|
'mammal', |
|
'meat', |
|
'mlsoftware', |
|
'mollusk', |
|
'month', |
|
'nut', |
|
'officebuildingroom', |
|
'physiologicalcondition', |
|
'plant', |
|
'politicsissue', |
|
'profession', |
|
'professionalorganization', |
|
'reptile', |
|
'room', |
|
'sport', |
|
'tableitem', |
|
'tradeunion', |
|
'vegetable', |
|
'vehicle', |
|
'vertebrate', |
|
'weapon', |
|
'wine' |
|
] |
|
|
|
|
|
def clean(token): |
|
_, _type, token = token.split(":") |
|
token = token.replace("_", " ") |
|
token = token.replace("__", "") |
|
token = re.sub(r"00\d\Z", "", token) |
|
token = re.sub(r"\An(\d+)", r"\1", token) |
|
if _type in non_entity_types: |
|
return token, _type |
|
new_token = [] |
|
for _t in token.split(" "): |
|
if len(_t) == 0: |
|
continue |
|
if _t in short: |
|
_t = _t.upper() |
|
else: |
|
_t = _t.capitalize() |
|
new_token.append(_t) |
|
return " ".join(new_token), _type |
|
|
|
|
|
if not os.path.exists(data_dir_nell): |
|
raise ValueError("Please download the dataset first\n" |
|
"wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz\n" |
|
"tar -xzf nell.tar.gz") |
|
|
|
if not os.path.exists(data_dir_wiki): |
|
raise ValueError("Please download the dataset first\n" |
|
"wget https://sites.cs.ucsb.edu/~xwhan/datasets/wiki.tar.gz\n" |
|
"tar -xzf wiki.tar.gz") |
|
|
|
|
|
def read_file(_file): |
|
with open(_file, 'r') as f_reader: |
|
tmp = json.load(f_reader) |
|
flatten = list(chain(*[[{"relation": r, "head": h, "tail": t} for (h, r, t) in v] for v in tmp.values()])) |
|
|
|
|
|
|
|
return flatten |
|
|
|
|
|
def read_vocab(_file): |
|
with open(_file) as f_reader: |
|
ent2ids = json.load(f_reader) |
|
return sorted(list(ent2ids.keys())) |
|
|
|
|
|
if __name__ == '__main__': |
|
vocab = read_vocab(f"{data_dir_nell}/ent2ids") |
|
with open("data/nell.vocab.txt", 'w') as f: |
|
f.write("\n".join(vocab)) |
|
vocab_clean = [clean(i)[0] if len(i.split(":")) > 2 else i for i in vocab] |
|
with open("data/nell.vocab.clean.txt", 'w') as f: |
|
f.write("\n".join(vocab_clean)) |
|
|
|
vocab = read_vocab(f"{data_dir_wiki}/ent2ids") |
|
with open("data/wiki.vocab.txt", 'w') as f: |
|
f.write("\n".join(vocab)) |
|
|
|
for i, s in zip(['dev_tasks.json', 'test_tasks.json', 'train_tasks.json'], ['validation', 'test', 'train']): |
|
d = read_file(f"{data_dir_nell}/{i}") |
|
for _d in d: |
|
head_entity, head_type = clean(_d['head']) |
|
_d['head_entity'] = head_entity |
|
_d['head_type'] = head_type |
|
|
|
tail_entity, tail_type = clean(_d['tail']) |
|
_d['tail_entity'] = tail_entity |
|
_d['tail_type'] = tail_type |
|
|
|
with open(f"data/nell.{s}.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(_d) for _d in d])) |
|
|
|
d = read_file(f"{data_dir_wiki}/{i}") |
|
for _d in d: |
|
_d['head_entity'] = '' |
|
_d['head_type'] = '' |
|
_d['tail_entity'] = '' |
|
_d['tail_type'] = '' |
|
with open(f"data/wiki.{s}.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(_d) for _d in d])) |
|
|