Exr0n commited on
Commit
33a6373
1 Parent(s): 5020ef0

v2 filter out link texts that are dictionary words

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .venv/
2018thresh10dev.csv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:53c34495315acde41ca549eaa1ad02726e79c6f2aad46b77f84b11fb2459e666
3
- size 55066171
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6690985884c8b4a58d0377ef2152bb1fbb233355df68d662539da856e697a414
3
+ size 51406267
2018thresh10test.csv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8089f88487de91ed778ecfed210f0f13488e3f670404c210ebd72ee3f1b268de
3
- size 36685709
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87ba47d64b3223a9ceda6c4c414312c4c8fa412c023d63ba8495909206f908fd
3
+ size 34147126
2018thresh10train.csv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2618a03dd0278017b987759f723cc7525d19ddc3c79247a187bcf0a2601663ed
3
- size 274277317
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a4c19706eb30b58641b06fa0e5efe53cf545f6bf49e3b5a30e62d96243517c8
3
+ size 255547715
2018thresh20dev.csv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:06c9b05cc92746020c8379d55d8738ca6923129ca8a15766df1687625d73cb30
3
- size 39942761
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e80d37c92672e2b2041d47035c737db5c7dc0a9c5938ede28c46cdc25321be5a
3
+ size 37218821
2018thresh20test.csv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:feea8c3d46deb539e8004bc5193a15509cb3b352d93431dbe5131a5e4356b9d9
3
- size 26669268
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7204244b6dc46350616835b741f925b98f50f6e9b4d5585d7815f226d5afde95
3
+ size 24867451
2018thresh20train.csv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ebd8c98658d16de99412d9f52d3b98409c0bcefd36409378fe303d5dd61c9eba
3
- size 198305773
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fcda4ee523a653d09f34c72e2134d56466d145c56a95596468ca7a1f4f9bf9c
3
+ size 185091962
2018thresh5dev.csv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf2e6b8bf0b091f3cbcc3cfbdd831989e35be1f8331de0a2f0d217e574c189c4
3
- size 71983269
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abf0f45844c3aa91747be22502d9d91570365aa0d085a1faaec17a04a84028c5
3
+ size 67296490
2018thresh5test.csv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f654c09ae5f8ffbb80ddcb79c2be63e37b8ae184ef99c9555fb1c1625fedf1b7
3
- size 48195090
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d9c39063394851308a52c71de4cae38386bfe7e16f73a355221dafe46a74b0f
3
+ size 44759191
2018thresh5train.csv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:866bad7a6fc1fcd477f87be9b856d289e47547878fcaeb000d1dba768a7b7468
3
- size 359260993
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7b71f64c53d1aea33ec8bd6067b99c766625897904fd7cccdd82d60eac17b5c
3
+ size 334315073
generate_wes_data.py CHANGED
@@ -1,14 +1,19 @@
1
  from datasets import load_dataset
2
  import pandas as pd
 
 
3
  import numpy as np
4
  from tqdm import tqdm
5
 
6
  from collections import defaultdict
7
  from operator import itemgetter as ig
8
  from itertools import islice, chain, repeat
9
- from random import sample, choice, shuffle
10
  from gc import collect
11
 
 
 
 
12
  def generate_splits(subset, split=[0.75, 0.15, 0.1]):
13
  assert abs(sum(split) - 1.0) < 0.0001
14
  # get the data in dictionary form
@@ -16,6 +21,9 @@ def generate_splits(subset, split=[0.75, 0.15, 0.1]):
16
  ds = load_dataset('Exr0n/wiki-entity-similarity', subset, split='train')
17
  ds = list(tqdm(ds, total=len(ds)))
18
  for article, link in tqdm(map(ig('article', 'link_text'), ds), total=len(ds)):
 
 
 
19
  groups[article].append(link)
20
  del ds
21
 
@@ -38,10 +46,10 @@ def generate_splits(subset, split=[0.75, 0.15, 0.1]):
38
  for i, keys in enumerate(splits):
39
  for key in keys:
40
  try:
41
- got = sample(keys, len(groups[key])+1)
42
  ret[i].append(
43
- [(key, choice(groups[k])) for k in got if k != key]
44
- [:len(groups[key])]
45
  )
46
  except ValueError:
47
  raise ValueError("well frick one group is bigger than all the others combined. try sampling one at a time")
@@ -51,14 +59,16 @@ def generate_splits(subset, split=[0.75, 0.15, 0.1]):
51
 
52
 
53
  if __name__ == '__main__':
 
 
54
  for size in [5, 10, 20]:
55
- x = generate_splits(subset='2018thresh' + str(size) + 'corpus')
56
 
57
  for (data, labels), split in zip(x, ['train', 'dev', 'test']):
58
  articles, lts = list(zip(*data))
59
  df = pd.DataFrame({ 'article': articles, 'link_text': lts, 'is_same': list(labels) })
60
  df = df.sample(frac=1).reset_index(drop=True)
61
- df.to_csv('2018thresh' + str(size) + split + '.csv', index=False)
62
  # print(df.head(30), df.tail(30))
63
 
64
  # tests
 
1
  from datasets import load_dataset
2
  import pandas as pd
3
+ from nltk.corpus import words
4
+ from nltk import WordNetLemmatizer
5
  import numpy as np
6
  from tqdm import tqdm
7
 
8
  from collections import defaultdict
9
  from operator import itemgetter as ig
10
  from itertools import islice, chain, repeat
11
+ from random import seed, sample, choice, shuffle
12
  from gc import collect
13
 
14
+ filter_dict = set(words.words())
15
+ ltize = WordNetLemmatizer().lemmatize
16
+
17
  def generate_splits(subset, split=[0.75, 0.15, 0.1]):
18
  assert abs(sum(split) - 1.0) < 0.0001
19
  # get the data in dictionary form
 
21
  ds = load_dataset('Exr0n/wiki-entity-similarity', subset, split='train')
22
  ds = list(tqdm(ds, total=len(ds)))
23
  for article, link in tqdm(map(ig('article', 'link_text'), ds), total=len(ds)):
24
+ if (ltize(article.lower()) not in filter_dict) and (ltize(link.lower()) in filter_dict):
25
+ # print(article, link, 'not quite right!')
26
+ continue # remove if link text is a dictionary word but article is not
27
  groups[article].append(link)
28
  del ds
29
 
 
46
  for i, keys in enumerate(splits):
47
  for key in keys:
48
  try:
49
+ got = sample(keys, len(groups[key])+1) # sample n+1 keys
50
  ret[i].append(
51
+ [(key, choice(groups[k])) for k in got if k != key] # get a random link title from that key, if it's not the current key
52
+ [:len(groups[key])] # ensure we don't have too many
53
  )
54
  except ValueError:
55
  raise ValueError("well frick one group is bigger than all the others combined. try sampling one at a time")
 
59
 
60
 
61
  if __name__ == '__main__':
62
+ seed(0x326ccc)
63
+ year = 2018
64
  for size in [5, 10, 20]:
65
+ x = generate_splits(subset=f'{year}thresh' + str(size) + 'corpus')
66
 
67
  for (data, labels), split in zip(x, ['train', 'dev', 'test']):
68
  articles, lts = list(zip(*data))
69
  df = pd.DataFrame({ 'article': articles, 'link_text': lts, 'is_same': list(labels) })
70
  df = df.sample(frac=1).reset_index(drop=True)
71
+ df.to_csv(f'{year}thresh' + str(size) + split + '.csv', index=False)
72
  # print(df.head(30), df.tail(30))
73
 
74
  # tests