File size: 3,374 Bytes
64ae001
 
33a6373
 
64ae001
 
 
 
 
 
33a6373
64ae001
 
33a6373
 
 
64ae001
 
 
 
 
 
 
33a6373
 
 
64ae001
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33a6373
64ae001
33a6373
 
64ae001
 
 
 
 
 
 
 
 
33a6373
 
64ae001
33a6373
64ae001
 
 
 
 
33a6373
64ae001
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
from datasets import load_dataset
import pandas as pd
from nltk.corpus import words
from nltk import WordNetLemmatizer
import numpy as np
from tqdm import tqdm

from collections import defaultdict
from operator import itemgetter as ig
from itertools import islice, chain, repeat
from random import seed, sample, choice, shuffle
from gc import collect

filter_dict = set(words.words())
ltize = WordNetLemmatizer().lemmatize

def generate_splits(subset, split=[0.75, 0.15, 0.1]):
    assert abs(sum(split) - 1.0) < 0.0001
    # get the data in dictionary form
    groups = defaultdict(list)
    ds = load_dataset('Exr0n/wiki-entity-similarity', subset, split='train')
    ds = list(tqdm(ds, total=len(ds)))
    for article, link in tqdm(map(ig('article', 'link_text'), ds), total=len(ds)):
        if (ltize(article.lower()) not in filter_dict) and (ltize(link.lower()) in filter_dict):
            # print(article, link, 'not quite right!')
            continue  # remove if link text is a dictionary word but article is not
        groups[article].append(link)
    del ds

    # greedily allocate splits
    order = sorted(groups.keys(), reverse=True, key=lambda e: groups[e])
    splits = [[] for _ in split]
    sizes = [0.001] * len(split)    # avoid div zero error
    for group in order:
        impoverished = np.argmax([ s - (x/sum(sizes)) for x, s in zip(sizes, split) ])
        splits[impoverished].append(group)
        sizes[impoverished] += len(groups[group])

    sizes = [ int(x) for x in sizes ]
    print('final sizes', sizes, [x/sum(sizes) for x in sizes])

    # generate positive examples
    ret = [ [[(k, t) for t in groups[k]] for k in keys] for keys in splits ]

    # generate negative examples randomly (TODO: probably a more elegant swapping soln)
    for i, keys in enumerate(splits):
        for key in keys:
            try:
                got = sample(keys, len(groups[key])+1)  # sample n+1 keys
                ret[i].append(
                    [(key, choice(groups[k])) for k in got if k != key] # get a random link title from that key, if it's not the current key
                    [:len(groups[key])] # ensure we don't have too many
                )
            except ValueError:
                raise ValueError("well frick one group is bigger than all the others combined. try sampling one at a time")

    collect()
    return [(chain(*s), chain(repeat(1, z), repeat(0, z))) for z, s in zip(sizes, ret)]


if __name__ == '__main__':
    seed(0x326ccc)
    year = 2018
    for size in [5, 10, 20]:
        x = generate_splits(subset=f'{year}thresh' + str(size) + 'corpus')

        for (data, labels), split in zip(x, ['train', 'dev', 'test']):
            articles, lts = list(zip(*data))
            df = pd.DataFrame({ 'article': articles, 'link_text': lts, 'is_same': list(labels) })
            df = df.sample(frac=1).reset_index(drop=True)
            df.to_csv(f'{year}thresh' + str(size) + split + '.csv', index=False)
            # print(df.head(30), df.tail(30))

    # tests
    # for data, labels in x[2:]:
    #     data = list(data)
    #     labels = list(labels)
    #
    #     assert sum(labels) * 2 == len(labels)
    #     num = sum(labels)
    #
    #     before = [ a for a, _ in data[:num] ]
    #     after  = [ a for a, _ in data[num:] ]
    #     assert before == after
    #
    #     print(data[num:])