Scandi+English tokenizer on OSCAR
Browse files- .gitattributes +7 -0
- special_tokens_map.json +6 -0
- texts/all.txt +3 -0
- texts/da.opening.txt +3 -0
- texts/en.opening.txt +3 -0
- texts/nn.opening.txt +3 -0
- texts/nn.opening.wiki.txt +3 -0
- texts/no.opening.txt +3 -0
- texts/sv.opening.txt +3 -0
- tokenizer.json +0 -0
- tokenizer_config.json +51 -0
- train_tokenizer.py +168 -0
.gitattributes
CHANGED
@@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
texts/nn.opening.wiki.txt filter=lfs diff=lfs merge=lfs -text
|
37 |
+
texts/no.opening.txt filter=lfs diff=lfs merge=lfs -text
|
38 |
+
texts/sv.opening.txt filter=lfs diff=lfs merge=lfs -text
|
39 |
+
texts/all.txt filter=lfs diff=lfs merge=lfs -text
|
40 |
+
texts/da.opening.txt filter=lfs diff=lfs merge=lfs -text
|
41 |
+
texts/en.opening.txt filter=lfs diff=lfs merge=lfs -text
|
42 |
+
texts/nn.opening.txt filter=lfs diff=lfs merge=lfs -text
|
special_tokens_map.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"eos_token": "</s>",
|
4 |
+
"pad_token": "<pad>",
|
5 |
+
"unk_token": "<unk>"
|
6 |
+
}
|
texts/all.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fef07b8bfdef0f9071b381deb4a0ac3627efa5e42f31f34d82f4e740d877a50c
|
3 |
+
size 16552582106
|
texts/da.opening.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f7525b7cf12415c0c2bbafd60f04a07b957d0b14dcad53a64c8fc59d1e25297e
|
3 |
+
size 3258201809
|
texts/en.opening.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b64c3b85e582d4dbe3b16c900cb529ffce7118f56ce2b1620c1431921f0230d2
|
3 |
+
size 6963630651
|
texts/nn.opening.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e94a2425df51215ceb5b4c64df20fe1e76c2aefa1a8fb093acb2ff6b16906ed
|
3 |
+
size 87056553
|
texts/nn.opening.wiki.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:00aa99b1c0aa097608c83a1d6244ada584e0cba27f9c4e08119fb2a40c142715
|
3 |
+
size 113157261
|
texts/no.opening.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b3edaf35fb8b2b66d5c54b345da22c9e2720709cd443ebca59cd62d97c75dfd8
|
3 |
+
size 2399220841
|
texts/sv.opening.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8278001e6796c236c2301143bd1622e7346c70ebc4e82b3334698fb8e8398b01
|
3 |
+
size 3731314991
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"0": {
|
6 |
+
"content": "<unk>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"1": {
|
14 |
+
"content": "<s>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"2": {
|
22 |
+
"content": "</s>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": false,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
},
|
29 |
+
"3": {
|
30 |
+
"content": "<pad>",
|
31 |
+
"lstrip": false,
|
32 |
+
"normalized": false,
|
33 |
+
"rstrip": false,
|
34 |
+
"single_word": false,
|
35 |
+
"special": true
|
36 |
+
}
|
37 |
+
},
|
38 |
+
"bos_token": "<s>",
|
39 |
+
"bos_token_id": 1,
|
40 |
+
"clean_up_tokenization_spaces": false,
|
41 |
+
"eos_token": "</s>",
|
42 |
+
"eos_token_id": 2,
|
43 |
+
"model_max_length": 1000000000000000019884624838656,
|
44 |
+
"pad_token": "<pad>",
|
45 |
+
"pad_token_id": 3,
|
46 |
+
"padding_side": "right",
|
47 |
+
"tokenizer_class": "LlamaTokenizer",
|
48 |
+
"unk_token": "<unk>",
|
49 |
+
"unk_token_id": 0,
|
50 |
+
"use_default_system_prompt": false
|
51 |
+
}
|
train_tokenizer.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import tempfile
|
5 |
+
from pathlib import Path
|
6 |
+
from tqdm import tqdm
|
7 |
+
|
8 |
+
from datasets import load_dataset
|
9 |
+
from tokenizers import SentencePieceBPETokenizer
|
10 |
+
from transformers import LlamaTokenizerFast, TrainingArguments, AutoTokenizer
|
11 |
+
|
12 |
+
def main(args):
|
13 |
+
|
14 |
+
# Load the dataset from the huggingface Hub and prepare it for training
|
15 |
+
if args.dataset_name is not None:
|
16 |
+
if args.dataset_type:
|
17 |
+
if os.path.isfile(args.dataset_name):
|
18 |
+
data_files = [args.dataset_name]
|
19 |
+
else:
|
20 |
+
data_files = os.listdir(args.dataset_name)
|
21 |
+
data_files = [Path(args.dataset_name) / f for f in data_files]
|
22 |
+
print(f"Training on {len(data_files)} files")
|
23 |
+
dataset = load_dataset(args.dataset_type,
|
24 |
+
data_files=data_files,
|
25 |
+
split=args.dataset_split,
|
26 |
+
token=args.hub_token if args.hub_token else None
|
27 |
+
)
|
28 |
+
else:
|
29 |
+
dataset = load_dataset(args.dataset_name,
|
30 |
+
split=args.dataset_split,
|
31 |
+
streaming=True,
|
32 |
+
token=args.hub_token if args.hub_token else None
|
33 |
+
)
|
34 |
+
print(dataset)
|
35 |
+
else:
|
36 |
+
raise ValueError("No dataset name provided or dataset is already tokenized")
|
37 |
+
|
38 |
+
# Remove non text columns
|
39 |
+
dataset = dataset.remove_columns([col for col in dataset.column_names if col != "text"])
|
40 |
+
|
41 |
+
# Randomize docs
|
42 |
+
dataset = dataset.shuffle(seed=args.seed)
|
43 |
+
|
44 |
+
# Select `num_samples` from the dataset
|
45 |
+
if args.num_samples:
|
46 |
+
dataset = dataset.select(range(args.num_samples))
|
47 |
+
|
48 |
+
# Create a SentencePieceBPETokenizer
|
49 |
+
tokenizer = SentencePieceBPETokenizer()
|
50 |
+
|
51 |
+
# Train the SentencePieceBPETokenizer on the dataset
|
52 |
+
tokenizer.train_from_iterator(
|
53 |
+
iterator=dataset['text'],
|
54 |
+
vocab_size=args.vocab_size,
|
55 |
+
show_progress=True,
|
56 |
+
special_tokens=["<unk>", "<s>", "</s>", "<pad>"],
|
57 |
+
)
|
58 |
+
|
59 |
+
# Save the tokenizer
|
60 |
+
new_tokenizer_file = tempfile.NamedTemporaryFile(prefix='tokenizer_', suffix='.json').name
|
61 |
+
tokenizer.save(new_tokenizer_file, pretty=True)
|
62 |
+
|
63 |
+
# Load reference tokenizer
|
64 |
+
if args.reference_tokenizer is not None and args.hub_token is not None:
|
65 |
+
reference_tokenizer = AutoTokenizer.from_pretrained(args.reference_tokenizer, token=args.hub_token if args.hub_token else None)
|
66 |
+
reference_tokenizer_path = tempfile.TemporaryDirectory().name
|
67 |
+
reference_tokenizer.save_pretrained(reference_tokenizer_path)
|
68 |
+
else:
|
69 |
+
raise ValueError("No tokenizer name provided or no hub token provided. Try using `--reference_tokenizer 'mistralai/Mistral-7B-Instruct-v0.2'")
|
70 |
+
|
71 |
+
# Read and dump the json file for the new tokenizer and the reference tokenizer
|
72 |
+
with open(new_tokenizer_file) as f:
|
73 |
+
new_tokenizer_json = json.load(f)
|
74 |
+
|
75 |
+
with open(Path(reference_tokenizer_path) / "tokenizer.json") as f:
|
76 |
+
reference_tokenizer_json = json.load(f)
|
77 |
+
|
78 |
+
# Add the reference tokenizer's config to the new tokenizer's config
|
79 |
+
new_tokenizer_json["normalizer"] = reference_tokenizer_json["normalizer"]
|
80 |
+
new_tokenizer_json["pre_tokenizer"] = reference_tokenizer_json["pre_tokenizer"]
|
81 |
+
new_tokenizer_json["post_processor"] = reference_tokenizer_json["post_processor"]
|
82 |
+
new_tokenizer_json["decoder"] = reference_tokenizer_json["decoder"]
|
83 |
+
new_tokenizer_json["model"]['fuse_unk'] = reference_tokenizer_json["model"]['fuse_unk']
|
84 |
+
new_tokenizer_json["model"]['byte_fallback'] = reference_tokenizer_json["model"]['byte_fallback']
|
85 |
+
|
86 |
+
# Dump the new tokenizer's config
|
87 |
+
with open(new_tokenizer_file, "w") as f:
|
88 |
+
json.dump(new_tokenizer_json, f, indent=2, ensure_ascii=False)
|
89 |
+
|
90 |
+
# Load the new tokenizer as a LlamaTokenizerFast
|
91 |
+
new_llama_tokenizer = LlamaTokenizerFast(
|
92 |
+
tokenizer_file=new_tokenizer_file,
|
93 |
+
name_or_path=args.reference_tokenizer + "-tokenizer",
|
94 |
+
unk_token="<unk>",
|
95 |
+
unk_token_id=0,
|
96 |
+
bos_token="<s>",
|
97 |
+
bos_token_id=1,
|
98 |
+
eos_token="</s>",
|
99 |
+
eos_token_id=2,
|
100 |
+
pad_token="<pad>",
|
101 |
+
pad_token_id=3,
|
102 |
+
padding_side="right",
|
103 |
+
)
|
104 |
+
|
105 |
+
# Save the new tokenizer
|
106 |
+
new_llama_tokenizer.save_pretrained(args.output)
|
107 |
+
|
108 |
+
if __name__ == "__main__":
|
109 |
+
parser = argparse.ArgumentParser(description="Train a new Llama tokenizer")
|
110 |
+
parser.add_argument(
|
111 |
+
"--dataset_name",
|
112 |
+
type=str,
|
113 |
+
default=None,
|
114 |
+
help="The name of the dataset to be tokenized",
|
115 |
+
)
|
116 |
+
parser.add_argument(
|
117 |
+
"--dataset_type",
|
118 |
+
type=str,
|
119 |
+
default=None,
|
120 |
+
help="The type, 'text', 'json', or 'csv'. Leave blank for regular HF datasets",
|
121 |
+
)
|
122 |
+
parser.add_argument(
|
123 |
+
"--dataset_split",
|
124 |
+
type=str,
|
125 |
+
default=None,
|
126 |
+
help="The split of the dataset to be tokenized",
|
127 |
+
)
|
128 |
+
parser.add_argument(
|
129 |
+
"--hub_token",
|
130 |
+
type=str,
|
131 |
+
default=None,
|
132 |
+
help="The token to access the dataset on the hub",
|
133 |
+
)
|
134 |
+
parser.add_argument(
|
135 |
+
"--reference_tokenizer",
|
136 |
+
type=str,
|
137 |
+
default=None,
|
138 |
+
help="The name of the reference tokenizer to use",
|
139 |
+
)
|
140 |
+
parser.add_argument(
|
141 |
+
"--seed",
|
142 |
+
type=int,
|
143 |
+
default=123,
|
144 |
+
help="set random seed",
|
145 |
+
)
|
146 |
+
parser.add_argument(
|
147 |
+
"--num_samples",
|
148 |
+
type=int,
|
149 |
+
default=None,
|
150 |
+
help="Number of samples to use from the dataset",
|
151 |
+
)
|
152 |
+
parser.add_argument(
|
153 |
+
"--vocab_size",
|
154 |
+
type=int,
|
155 |
+
default=None,
|
156 |
+
help="Vocabulary size to use for the tokenizer",
|
157 |
+
)
|
158 |
+
parser.add_argument(
|
159 |
+
"--output",
|
160 |
+
type=str,
|
161 |
+
default="./",
|
162 |
+
help="Output path for the new tokenizer",
|
163 |
+
)
|
164 |
+
args = parser.parse_args()
|
165 |
+
main(args)
|
166 |
+
|
167 |
+
# How to run:
|
168 |
+
# python train_tokenizer.py --dataset_name texts/all.txt --dataset_type text --dataset_split train --reference_tokenizer mistralai/Mistral-7B-Instruct-v0.2 --vocab_size 32768 --hub_token True
|