dumitrescustefan commited on
Commit
dc45156
1 Parent(s): 43857a6
Files changed (3) hide show
  1. .gitattributes +2 -0
  2. _split_train.py +3 -87
  3. diacritic.py +3 -61
.gitattributes CHANGED
@@ -49,3 +49,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
49
  *.jpg filter=lfs diff=lfs merge=lfs -text
50
  *.jpeg filter=lfs diff=lfs merge=lfs -text
51
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
49
  *.jpg filter=lfs diff=lfs merge=lfs -text
50
  *.jpeg filter=lfs diff=lfs merge=lfs -text
51
  *.webp filter=lfs diff=lfs merge=lfs -text
52
+ _split_train.py filter=lfs diff=lfs merge=lfs -text
53
+ diacritic.py filter=lfs diff=lfs merge=lfs -text
_split_train.py CHANGED
@@ -1,87 +1,3 @@
1
- import os, json, gzip
2
-
3
- def zip_file(lines, dest_file):
4
- print(f"Zipping {dest_file} ... ")
5
- with gzip.open(dest_file, 'wt') as w:
6
- w.writelines(lines)
7
- print(f" .... done.")
8
-
9
- def split_train(MB_PER_FILE = 100):
10
- print("Splitting TRAIN into shards with {} MB per file:".format(MB_PER_FILE))
11
-
12
- os.makedirs("data", exist_ok=True)
13
-
14
- print("Working ...")
15
- cnt = 0
16
- lid = 0
17
- shard = 0
18
- cutoff = MB_PER_FILE * 1000 * 1000
19
- accumulator = []
20
- line_cnt = 0
21
- with open("train.txt", "r", encoding="utf8") as f:
22
- for line in f:
23
- line_cnt += 1
24
- if line_cnt % 1000000 == 0:
25
- print(f"\t{line_cnt/1000000:.2f}M lines, at shard {shard} ... ")
26
- cnt += len(line)
27
-
28
- data = {
29
- "id": lid,
30
- "text": line.strip()
31
- }
32
- accumulator.append(json.dumps(data, ensure_ascii=False)+"\n")
33
- lid += 1
34
-
35
- if cnt > cutoff:
36
- zip_file(accumulator, os.path.join("data", "train-"+str(shard).zfill(3) + ".json.gz"))
37
- cnt = 0
38
- accumulator = []
39
- shard += 1
40
-
41
- # flush buffer
42
- zip_file(accumulator, os.path.join("data", "train-"+str(shard).zfill(3) + ".json.gz"))
43
- shard += 1
44
-
45
- print("Done, wrote {} files.".format(shard))
46
-
47
- def write_valid():
48
- # do valid
49
- cnt = 0
50
- lid = 0
51
- accumulator = []
52
- with open("valid.txt", "r", encoding="utf8") as f:
53
- for line in f:
54
- cnt += len(line)
55
- data = {
56
- "id": lid,
57
- "text": line.strip()
58
- }
59
- accumulator.append(json.dumps(data, ensure_ascii=False)+"\n")
60
- lid += 1
61
-
62
- zip_file(accumulator, os.path.join("data", "valid-000.json.gz"))
63
-
64
- def split_into_train_and_valid(percent=0.01):
65
- import random
66
- import ftfy
67
- import tqdm
68
- train_file = open("train.txt", "w", encoding="utf8")
69
- valid_file = open("valid.txt", "w", encoding="utf8")
70
- with open("corpus-diac.txt", "r", encoding="utf8") as f:
71
- cnt = 0
72
- for line in tqdm.tqdm(f):
73
- cnt += 1
74
- if cnt < 2000000:
75
- continue
76
- line = ftfy.fix_text(line.strip())+"\n"
77
- if random.random() <= percent:
78
- valid_file.write(line)
79
- else:
80
- train_file.write(line)
81
- train_file.close()
82
- valid_file.close()
83
-
84
- #split_into_train_and_valid()
85
- os.makedirs("data", exist_ok=True)
86
- write_valid() # do valid
87
- split_train() # do train
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62516c206bf1f6239cd687f2c95169b1d4f264ab54dee0187f86677116afecd1
3
+ size 2556
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
diacritic.py CHANGED
@@ -1,61 +1,3 @@
1
- import json
2
- import datasets
3
- import gzip
4
-
5
- logger = datasets.logging.get_logger(__name__)
6
- _DESCRIPTION = """\\nRomanian diacritic dataset"""
7
- _CITATION = """n/a"""
8
-
9
- _URL = "https://github.com/dumitrescustefan/diacritic"
10
- _DATA_URL = "https://huggingface.co/datasets/dumitrescustefan/diacritic/resolve/main/data/{split_suffix}-{index:03d}.json.gz"
11
- _N_SHARDS_PER_SPLIT = {
12
- "train": 78, "validation": 1
13
- }
14
-
15
- class RLM(datasets.GeneratorBasedBuilder):
16
- BUILDER_CONFIGS = [
17
- datasets.BuilderConfig(name="v1", version="1.0.0", description="v1.0 of romanian diacritic corpus"),
18
- ]
19
-
20
- DEFAULT_CONFIG_NAME = "v1"
21
-
22
- def _info(self):
23
- return datasets.DatasetInfo(
24
- description=_DESCRIPTION,
25
- features=datasets.Features(
26
- {
27
- "id": datasets.Value("int64"),
28
- "text": datasets.Value("string"),
29
- }
30
- ),
31
- supervised_keys=None,
32
- homepage=_URL,
33
- citation=_CITATION,
34
- )
35
-
36
- def _split_generators(self, dl_manager):
37
- data_urls = {}
38
- for split in ["train", "validation"]:
39
- data_urls[split] = [
40
- _DATA_URL.format(split_suffix=split, index=iindex) for iindex in range(_N_SHARDS_PER_SPLIT[split])
41
- ]
42
-
43
- train_downloaded_files = dl_manager.download(data_urls["train"])
44
- validation_downloaded_files = dl_manager.download(data_urls["validation"])
45
-
46
- return [
47
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files}),
48
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": validation_downloaded_files}),
49
- ]
50
-
51
- def _generate_examples(self, filepaths):
52
- """This function returns the examples in the raw (text) form by iterating on all the files."""
53
- id_ = 0
54
- for filepath in filepaths:
55
- logger.info("generating examples from = %s", filepath)
56
- with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
57
- for line in f:
58
- if line:
59
- example = json.loads(line)
60
- yield id_, example
61
- id_ += 1
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1dd36e7deba840b87837b54d3d4096dce3875b307679aad76ecebfe8d53cdc0
3
+ size 2260