File size: 4,554 Bytes
14cb2fa 2d1c9d2 14cb2fa 2d1c9d2 14cb2fa e78f69c 14cb2fa 2d1c9d2 14cb2fa b793b52 e78f69c b793b52 e78f69c 2d1c9d2 14cb2fa 2d1c9d2 14cb2fa 2d1c9d2 14cb2fa 2d1c9d2 14cb2fa 2d1c9d2 b793b52 2d1c9d2 14cb2fa 2d1c9d2 e78f69c 2d1c9d2 e78f69c 2d1c9d2 14cb2fa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
"""
- Wiki-One https://sites.cs.ucsb.edu/~xwhan/datasets/wiki.tar.gz
- NELL-One https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz
wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz
tar -xzf nell.tar.gz
wget https://sites.cs.ucsb.edu/~xwhan/datasets/wiki.tar.gz
tar -xzf wiki.tar.gz
"""
import os
import json
import re
from itertools import chain
data_dir_nell = "NELL"
data_dir_wiki = "Wiki"
os.makedirs("data", exist_ok=True)
short = ['alcs', "uk", "us", "usa", "npr", "nbc", "bbc", "cnn", "abc", "cbs", "nfl", "mlb", "nba", "nhl", "pga", "ncaa",
"wjhu", "pbs", "un"]
non_entity_types = [
'academicfield',
'agent',
'agriculturalproduct',
'amphibian',
'animal',
'aquarium',
'arachnid',
'architect',
'arthropod',
'bakedgood',
'bathroomitem',
'bedroomitem',
'beverage',
'bird',
'blog',
'bodypart',
'bone',
'candy',
'cave',
'chemical',
'clothing',
'coffeedrink',
'condiment',
'crimeorcharge',
'crustacean',
'date',
'dateliteral',
'economicsector',
'fish',
'food',
'fruit',
'fungus',
'furniture',
'grain',
'hallwayitem',
'hobby',
'insect',
'invertebrate',
'jobposition',
'kitchenitem',
'landscapefeatures',
'legume',
'location',
'mammal',
'meat',
'mlsoftware',
'mollusk',
'month',
'nut',
'officebuildingroom',
'physiologicalcondition',
'plant',
'politicsissue',
'profession',
'professionalorganization',
'reptile',
'room',
'sport',
'tableitem',
'tradeunion',
'vegetable',
'vehicle',
'vertebrate',
'weapon',
'wine'
]
def clean(token):
_, _type, token = token.split(":")
token = token.replace("_", " ")
token = token.replace("__", "")
token = re.sub(r"00\d\Z", "", token)
token = re.sub(r"\An(\d+)", r"\1", token)
if _type in non_entity_types:
return token, _type
new_token = []
for _t in token.split(" "):
if len(_t) == 0:
continue
if _t in short:
_t = _t.upper()
else:
_t = _t.capitalize()
new_token.append(_t)
return " ".join(new_token), _type
if not os.path.exists(data_dir_nell):
raise ValueError("Please download the dataset first\n"
"wget https://sites.cs.ucsb.edu/~xwhan/datasets/nell.tar.gz\n"
"tar -xzf nell.tar.gz")
if not os.path.exists(data_dir_wiki):
raise ValueError("Please download the dataset first\n"
"wget https://sites.cs.ucsb.edu/~xwhan/datasets/wiki.tar.gz\n"
"tar -xzf wiki.tar.gz")
def read_file(_file):
with open(_file, 'r') as f_reader:
tmp = json.load(f_reader)
flatten = list(chain(*[[{"relation": r, "head": h, "tail": t} for (h, r, t) in v] for v in tmp.values()]))
# flatten = {}
# for k, v in tmp.items():
# flatten[k] = [{"relation": r, "head": h, "tail": t} for (h, r, t) in v]
return flatten
def read_vocab(_file):
with open(_file) as f_reader:
ent2ids = json.load(f_reader)
return sorted(list(ent2ids.keys()))
if __name__ == '__main__':
vocab = read_vocab(f"{data_dir_nell}/ent2ids")
with open("data/nell.vocab.txt", 'w') as f:
f.write("\n".join(vocab))
vocab_clean = [clean(i)[0] if len(i.split(":")) > 2 else i for i in vocab]
with open("data/nell.vocab.clean.txt", 'w') as f:
f.write("\n".join(vocab_clean))
vocab = read_vocab(f"{data_dir_wiki}/ent2ids")
with open("data/wiki.vocab.txt", 'w') as f:
f.write("\n".join(vocab))
for i, s in zip(['dev_tasks.json', 'test_tasks.json', 'train_tasks.json'], ['validation', 'test', 'train']):
d = read_file(f"{data_dir_nell}/{i}")
for _d in d:
head_entity, head_type = clean(_d['head'])
_d['head_entity'] = head_entity
_d['head_type'] = head_type
tail_entity, tail_type = clean(_d['tail'])
_d['tail_entity'] = tail_entity
_d['tail_type'] = tail_type
with open(f"data/nell.{s}.jsonl", "w") as f:
f.write("\n".join([json.dumps(_d) for _d in d]))
d = read_file(f"{data_dir_wiki}/{i}")
for _d in d:
_d['head_entity'] = ''
_d['head_type'] = ''
_d['tail_entity'] = ''
_d['tail_type'] = ''
with open(f"data/wiki.{s}.jsonl", "w") as f:
f.write("\n".join([json.dumps(_d) for _d in d]))
|