Datasets:

Modalities:
Tabular
Text
Formats:
parquet
Languages:
Danish
ArXiv:
Libraries:
Datasets
Dask
File size: 3,363 Bytes
43cfc61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
"""
a quick script for getting wordcounts of all danish words in gigaword
"""
# import torch
# import torch.multiprocessing as mp
# mp.set_start_method('spawn', force=True)
# torch.set_num_threads(1)

import json
import os
from collections import Counter, defaultdict
from pathlib import Path

# from dacy.download import download_model, DEFAULT_CACHE_DIR
from typing import List, Optional, Tuple

import spacy

# model = "da_dacy_large_tft-0.0.0"
word_freq_path = "/data/DAGW/word_freqs"
dagw_sektioner = "/data/DAGW/dagw-master/sektioner"

# download_model(model, DEFAULT_CACHE_DIR)
# path = os.path.join(DEFAULT_CACHE_DIR, model)

nlp = spacy.load("da_core_news_lg", exclude=["parser", "ner"])
# nlp.get_pipe("transformer").model.attrs["flush_cache_chance"] = 0.1


Path(word_freq_path).mkdir(parents=True, exist_ok=True)

sections = os.listdir(dagw_sektioner)
filepaths = {}
for p in sections:
    subpath = os.path.join(dagw_sektioner, p)
    filepaths[p] = [
        os.path.join(subpath, p)
        for p in os.listdir(subpath)
        if p != "LICENSE" and not p.endswith(".jsonl")
    ]


def wordpiece_group_text(text, size=500):
    from transformers import AutoTokenizer

    tokenizer = AutoTokenizer.from_pretrained(
        "Maltehb/-l-ctra-danish-electra-small-uncased", strip_accents=False
    )
    out = tokenizer.encode(text, add_special_tokens=False)

    prv = 0
    for i in range(size, len(out), size):
        yield tokenizer.decode(out[prv:i])
        prv = i
    if prv < len(out):
        yield tokenizer.decode(out[prv : len(out)])


def group_text(text, size=2400):
    length = len(text)
    prv = 0
    for i in range(size, length, size):
        yield text[prv:i]
        prv = i
    if prv < length:
        yield text[prv:length]


def text_gen(filepaths):
    for i, file in enumerate(filepaths):
        if i % 10000 == 0:
            print("\t", i, "/", len(filepaths))
        with open(file, "r") as f:
            text = f.read()
            for t in group_text(text):
                yield t


class WordCounter:
    def __init__(self, l: Optional[List] = None):
        self.dict = defaultdict(lambda: defaultdict(int))
        if l is not None:
            self.add(l)

    def add(self, l: list):
        for token, pos in l:
            self.dict[token][pos] += 1

    def __add__(self, other):
        for k_tok in other.dict:
            if k_tok in self.dict:
                for pos, count in other.dict[k_tok].items():
                    self.dict[k_tok][pos] += count
            else:
                self.dict[k_tok] = other.dict[k_tok]
        return self


for sec in filepaths:
    print("Starting Section:", sec)
    docs = nlp.pipe(texts=text_gen(filepaths[sec]), n_process=10, batch_size=8)

    n = 0
    word_counts = WordCounter()
    for i, doc in enumerate(docs, start=1):
        word_counts += WordCounter([(t.text, t.tag_) for t in doc])

        if i % 10000 == 0:
            with open(
                os.path.join(word_freq_path, f"wordfreq_{sec}_{n}.json"), "w"
            ) as f:
                json_str = json.dumps(word_counts.dict)
                f.write(json_str)
            word_counts = WordCounter()
            n += 1

    with open(os.path.join(word_freq_path, f"wordfreq_{sec}_{n}.json"), "w") as f:
        json_str = json.dumps(word_counts.dict)
        f.write(json_str)