|
import json |
|
import os |
|
import tarfile |
|
import zipfile |
|
import gzip |
|
import requests |
|
from itertools import chain |
|
from glob import glob |
|
import gdown |
|
|
|
from datasets import load_dataset |
|
|
|
k = 10 |
|
m = 5 |
|
top_n = 10 |
|
|
|
|
|
def wget(url, cache_dir: str = './cache', gdrive_filename: str = None): |
|
""" wget and uncompress data_iterator """ |
|
os.makedirs(cache_dir, exist_ok=True) |
|
if url.startswith('https://drive.google.com'): |
|
assert gdrive_filename is not None, 'please provide fileaname for gdrive download' |
|
gdown.download(url, f'{cache_dir}/{gdrive_filename}', quiet=False) |
|
filename = gdrive_filename |
|
else: |
|
filename = os.path.basename(url) |
|
with open(f'{cache_dir}/{filename}', "wb") as f: |
|
r = requests.get(url) |
|
f.write(r.content) |
|
path = f'{cache_dir}/{filename}' |
|
|
|
if path.endswith('.tar.gz') or path.endswith('.tgz') or path.endswith('.tar'): |
|
if path.endswith('.tar'): |
|
tar = tarfile.open(path) |
|
else: |
|
tar = tarfile.open(path, "r:gz") |
|
tar.extractall(cache_dir) |
|
tar.close() |
|
os.remove(path) |
|
elif path.endswith('.zip'): |
|
with zipfile.ZipFile(path, 'r') as zip_ref: |
|
zip_ref.extractall(cache_dir) |
|
os.remove(path) |
|
elif path.endswith('.gz'): |
|
with gzip.open(path, 'rb') as f: |
|
with open(path.replace('.gz', ''), 'wb') as f_write: |
|
f_write.write(f.read()) |
|
os.remove(path) |
|
|
|
|
|
def get_training_data(): |
|
""" Get RelBERT training data |
|
|
|
Returns |
|
------- |
|
pairs: dictionary of list (positive pairs, negative pairs) |
|
{'1b': [[0.6, ('office', 'desk'), ..], [[-0.1, ('aaa', 'bbb'), ...]] |
|
""" |
|
cache_dir = 'cache' |
|
os.makedirs(cache_dir, exist_ok=True) |
|
remove_relation = None |
|
path_answer = f'{cache_dir}/Phase2Answers' |
|
path_scale = f'{cache_dir}/Phase2AnswersScaled' |
|
url = 'https://drive.google.com/u/0/uc?id=0BzcZKTSeYL8VYWtHVmxUR3FyUmc&export=download' |
|
filename = 'SemEval-2012-Platinum-Ratings.tar.gz' |
|
if not (os.path.exists(path_scale) and os.path.exists(path_answer)): |
|
wget(url, gdrive_filename=filename, cache_dir=cache_dir) |
|
files_answer = [os.path.basename(i) for i in glob(f'{path_answer}/*.txt')] |
|
files_scale = [os.path.basename(i) for i in glob(f'{path_scale}/*.txt')] |
|
assert files_answer == files_scale, f'files are not matched: {files_scale} vs {files_answer}' |
|
positives = {} |
|
negatives = {} |
|
positives_limit = {} |
|
all_relation_type = {} |
|
|
|
for i in files_scale: |
|
relation_id = i.split('-')[-1].replace('.txt', '') |
|
if remove_relation and int(relation_id[:-1]) in remove_relation: |
|
continue |
|
with open(f'{path_answer}/{i}', 'r') as f: |
|
lines_answer = [_l.replace('"', '').split('\t') for _l in f.read().split('\n') |
|
if not _l.startswith('#') and len(_l)] |
|
relation_type = list(set(list(zip(*lines_answer))[-1])) |
|
assert len(relation_type) == 1, relation_type |
|
relation_type = relation_type[0] |
|
with open(f'{path_scale}/{i}', 'r') as f: |
|
|
|
scales = [[float(_l[:5]), _l[6:].replace('"', '')] for _l in f.read().split('\n') |
|
if not _l.startswith('#') and len(_l)] |
|
scales = sorted(scales, key=lambda _x: _x[0]) |
|
|
|
positive_pairs = [[s, tuple(p.split(':'))] for s, p in filter(lambda _x: _x[0] > 0, scales)] |
|
positive_pairs = sorted(positive_pairs, key=lambda x: x[0], reverse=True) |
|
positives[relation_id] = list(list(zip(*positive_pairs))[1]) |
|
positives_limit[relation_id] = list(list(zip(*positive_pairs[:min(top_n, len(positive_pairs))]))[1]) |
|
negatives[relation_id] = [tuple(p.split(':')) for s, p in filter(lambda _x: _x[0] < 0, scales)] |
|
all_relation_type[relation_id] = relation_type |
|
parent = list(set([i[:-1] for i in all_relation_type.keys()])) |
|
|
|
|
|
relation_pairs_1st = [] |
|
relation_pairs_1st_validation = [] |
|
for p in parent: |
|
child_positive = list(filter(lambda x: x.startswith(p), list(all_relation_type.keys()))) |
|
child_negative = list(filter(lambda x: not x.startswith(p), list(all_relation_type.keys()))) |
|
positive_pairs = [] |
|
negative_pairs = [] |
|
for c in child_positive: |
|
positive_pairs += positives_limit[c] |
|
for c in child_negative: |
|
negative_pairs += positives_limit[c] |
|
|
|
relation_pairs_1st += [{ |
|
"positives": positive_pairs, "negatives": negative_pairs, "relation_type": p, "level": "parent" |
|
}] |
|
|
|
|
|
relation_pairs_2nd = [] |
|
relation_pairs_2nd_validation = [] |
|
for p in all_relation_type.keys(): |
|
positive_pairs = positives_limit[p] |
|
negative_pairs = [] |
|
for n in all_relation_type.keys(): |
|
if p == n: |
|
continue |
|
negative_pairs += positives[n] |
|
|
|
relation_pairs_2nd += [{ |
|
"positives": positive_pairs, "negatives": negative_pairs, "relation_type": p, "level": "child" |
|
}] |
|
|
|
relation_pairs_3rd = [] |
|
for p in all_relation_type.keys(): |
|
positive_pairs = positives[p] |
|
negative_pairs = positive_pairs + negatives[p] |
|
for n, anchor in enumerate(positive_pairs): |
|
if n > m: |
|
continue |
|
for _n, posi in enumerate(positive_pairs): |
|
if n < _n and len(negative_pairs) > _n + k: |
|
relation_pairs_3rd += [{ |
|
"positives": [(anchor, posi)], |
|
"negatives": [(anchor, neg) for neg in negative_pairs[_n+k:]], |
|
"relation_type": p, |
|
"level": "child_prototypical" |
|
}] |
|
|
|
train = relation_pairs_1st + relation_pairs_2nd + relation_pairs_3rd |
|
|
|
|
|
cn = load_dataset('relbert/conceptnet_high_confidence_v2') |
|
valid = list(chain(*cn.values())) |
|
for i in valid: |
|
i['level'] = 'N/A' |
|
return train, valid |
|
|
|
|
|
if __name__ == '__main__': |
|
data_train, data_validation = get_training_data() |
|
print(f"- training data : {len(data_train)}") |
|
print(f"- validation data : {len(data_validation)}") |
|
with open('dataset/train.jsonl', 'w') as f_writer: |
|
f_writer.write('\n'.join([json.dumps(i) for i in data_train])) |
|
with open('dataset/valid.jsonl', 'w') as f_writer: |
|
f_writer.write('\n'.join([json.dumps(i) for i in data_validation])) |
|
|