File size: 1,256 Bytes
72455b2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
from datasets import load_dataset
from itertools import islice
import sys
import time
from tqdm import tqdm
from transformers import AutoTokenizer
from itertools import islice
import json

NUM_PROC = 12

dataset = load_dataset("hoskinson-center/proof-pile")
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20B")

def length(example):
    return {"length": [len(x) for x in tokenizer(example["text"])["input_ids"]]}

dataset = dataset.map(length, batched=True, num_proc=NUM_PROC)

stats = dict()

for x in tqdm(dataset["train"]):
    meta = json.loads(x["meta"])

    if "config" in meta.keys():
        config = meta["config"]
    elif "set_name" in meta.keys(): 
        config = meta["set_name"]
    elif "subset_name" in meta.keys():
        path = meta["file"]
        config = path[:path.index("/")]
    else:
        print(x)
        raise KeyError()

    if config not in stats.keys():
        stats[config] = dict()
        stats[config]["bytes"] = 0 
        stats[config]["tokens"] = 0

    stats[config]["bytes"] += len(x["text"].encode("utf-8"))
    stats[config]["tokens"] += x["length"]


print(json.dumps(stats, indent=2))
print("saving stats...")

with open("stats.json", "w") as f: 
    f.write(json.dumps(stats, indent=2))