File size: 3,376 Bytes
f63efd0 86a5483 f63efd0 86a5483 f63efd0 54a668b f63efd0 54a668b f63efd0 54a668b f63efd0 86a5483 f63efd0 54a668b 86a5483 54a668b f63efd0 5280299 f63efd0 5280299 f63efd0 5280299 f63efd0 5280299 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 |
import json
import os
import gzip
import requests
import pandas as pd
urls = {
'dev1': 'https://home.ttic.edu/~kgimpel/comsense_resources/dev1.txt.gz',
'dev2': 'https://home.ttic.edu/~kgimpel/comsense_resources/dev2.txt.gz',
'test': 'https://home.ttic.edu/~kgimpel/comsense_resources/test.txt.gz',
'train': "https://home.ttic.edu/~kgimpel/comsense_resources/train600k.txt.gz"
}
os.makedirs("dataset", exist_ok=True)
def wget(url, cache_dir: str = './cache'):
""" wget and uncompress data_iterator """
os.makedirs(cache_dir, exist_ok=True)
filename = os.path.basename(url)
path = f'{cache_dir}/{filename}'
if os.path.exists(path):
return path.replace('.gz', '')
with open(path, "wb") as f_:
r = requests.get(url)
f_.write(r.content)
with gzip.open(path, 'rb') as f_:
with open(path.replace('.gz', ''), 'wb') as f_write:
f_write.write(f_.read())
os.remove(path)
return path.replace('.gz', '')
def read_file(file_name):
with open(file_name) as f_reader:
df = pd.DataFrame([i.split('\t') for i in f_reader.read().split('\n') if len(i) > 0], columns=['relation', 'head', 'tail', 'flag'])
df = df[[not i.startswith("Not") for i in df.relation]]
df_positive = df[df['flag'] != '0']
df_negative = df[df['flag'] == '0']
df_positive.pop('flag')
df_negative.pop('flag')
return df_positive, df_negative
if __name__ == '__main__':
test_p, test_n = read_file(wget(urls['test']))
dev1_p, dev1_n = read_file(wget(urls['dev1']))
dev2_p, dev2_n = read_file(wget(urls['dev2']))
train_p, _ = read_file(wget(urls['train']))
with open(f'dataset/test.jsonl', 'w') as f:
for relation, df_p in test_p.groupby('relation'):
if len(df_p) < 2:
continue
df_n = test_n[test_n['relation'] == relation]
f.write(json.dumps({
'relation_type': relation,
'positives': df_p[['head', 'tail']].to_numpy().tolist(),
'negatives': df_n[['head', 'tail']].to_numpy().tolist()
}) + '\n')
with open(f'dataset/train.jsonl', 'w') as f:
for relation, df_p in train_p.groupby('relation'):
if len(df_p) < 2:
continue
f.write(json.dumps({
'relation_type': relation,
'positives': df_p[['head', 'tail']].to_numpy().tolist(),
'negatives': []
}) + '\n')
with open(f'dataset/valid.jsonl', 'w') as f:
for relation, df_p in dev1_p.groupby('relation'):
if len(df_p) < 2:
continue
df_n = dev1_n[dev1_n['relation'] == relation]
f.write(json.dumps({
'relation_type': relation,
'positives': df_p[['head', 'tail']].to_numpy().tolist(),
'negatives': df_n[['head', 'tail']].to_numpy().tolist()
}) + '\n')
for relation, df_p in dev2_p.groupby('relation'):
if len(df_p) < 2:
continue
df_n = dev2_n[dev2_n['relation'] == relation]
f.write(json.dumps({
'relation_type': relation,
'positives': df_p[['head', 'tail']].to_numpy().tolist(),
'negatives': df_n[['head', 'tail']].to_numpy().tolist()
}) + '\n')
|