Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
asahi417 commited on
Commit
d452fea
1 Parent(s): cf83452

Update process.py

Browse files
Files changed (1) hide show
  1. process.py +13 -9
process.py CHANGED
@@ -8,9 +8,10 @@ import pandas as pd
8
  urls = {
9
  'dev1': 'https://home.ttic.edu/~kgimpel/comsense_resources/dev1.txt.gz',
10
  'dev2': 'https://home.ttic.edu/~kgimpel/comsense_resources/dev2.txt.gz',
11
- 'test': 'https://home.ttic.edu/~kgimpel/comsense_resources/test.txt.gz'
 
12
  }
13
- exclude = ['NotCapableOf', 'NotDesires']
14
 
15
 
16
  def wget(url, cache_dir: str = './cache'):
@@ -32,21 +33,24 @@ def wget(url, cache_dir: str = './cache'):
32
 
33
  def read_file(file_name):
34
  with open(file_name) as f_reader:
35
- df = pd.DataFrame([i.split('\t') for i in f_reader.read().split('\n') if len(i) > 0],
36
- columns=['relation', 'head', 'tail', 'flag'])
37
  df_positive = df[df['flag'] == '1']
38
  df_positive.pop('flag')
39
- df_positive = df_positive[[i not in exclude for i in df_positive.relation]]
 
40
  return df_positive
41
 
42
 
43
  if __name__ == '__main__':
44
- test = read_file(wget(urls['test']))
45
  with open(f'dataset/test.jsonl', 'w') as f:
 
46
  f.write("\n".join([json.dumps(i.to_dict()) for _, i in test.iterrows()]))
47
- dev1 = read_file(wget(urls['dev1']))
48
  with open(f'dataset/train.jsonl', 'w') as f:
 
49
  f.write("\n".join([json.dumps(i.to_dict()) for _, i in dev1.iterrows()]))
50
- dev2 = read_file(wget(urls['dev2']))
51
  with open(f'dataset/valid.jsonl', 'w') as f:
52
- f.write("\n".join([json.dumps(i.to_dict()) for _, i in dev2.iterrows()]))
 
 
 
8
  urls = {
9
  'dev1': 'https://home.ttic.edu/~kgimpel/comsense_resources/dev1.txt.gz',
10
  'dev2': 'https://home.ttic.edu/~kgimpel/comsense_resources/dev2.txt.gz',
11
+ 'test': 'https://home.ttic.edu/~kgimpel/comsense_resources/test.txt.gz',
12
+ 'train': "https://home.ttic.edu/~kgimpel/comsense_resources/train600k.txt.gz"
13
  }
14
+ # exclude = ['NotCapableOf', 'NotDesires']
15
 
16
 
17
  def wget(url, cache_dir: str = './cache'):
 
33
 
34
  def read_file(file_name):
35
  with open(file_name) as f_reader:
36
+ df = pd.DataFrame([i.split('\t') for i in f_reader.read().split('\n') if len(i) > 0], columns=['relation', 'head', 'tail', 'flag'])
 
37
  df_positive = df[df['flag'] == '1']
38
  df_positive.pop('flag')
39
+ #df_positive = df_positive[[i not in exclude for i in df_positive.relation]]
40
+ df_positive = df_positive[[not i.startswith("Not") for i in df_positive.relation]]
41
  return df_positive
42
 
43
 
44
  if __name__ == '__main__':
 
45
  with open(f'dataset/test.jsonl', 'w') as f:
46
+ test = read_file(wget(urls['test']))
47
  f.write("\n".join([json.dumps(i.to_dict()) for _, i in test.iterrows()]))
48
+
49
  with open(f'dataset/train.jsonl', 'w') as f:
50
+ train = read_file(wget(urls['train']))
51
  f.write("\n".join([json.dumps(i.to_dict()) for _, i in dev1.iterrows()]))
52
+
53
  with open(f'dataset/valid.jsonl', 'w') as f:
54
+ dev1 = read_file(wget(urls['dev1']))
55
+ dev2 = read_file(wget(urls['dev2']))
56
+ f.write("\n".join([json.dumps(i.to_dict()) for _, i in dev1.iterrows()] + [json.dumps(i.to_dict()) for _, i in dev2.iterrows()]))