Datasets:
helper scripts
Browse files- .flake8 +25 -0
- .gitignore +56 -0
- prepare_for_release.py +137 -0
- requirements.txt +2 -0
.flake8
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[flake8]
|
2 |
+
max-line-length = 115
|
3 |
+
|
4 |
+
ignore =
|
5 |
+
# these rules don't play well with black
|
6 |
+
# whitespace before :
|
7 |
+
E203
|
8 |
+
# line break before binary operator
|
9 |
+
W503
|
10 |
+
# line too long, who cares?
|
11 |
+
E501
|
12 |
+
|
13 |
+
exclude =
|
14 |
+
.venv
|
15 |
+
.git
|
16 |
+
__pycache__
|
17 |
+
docs/build
|
18 |
+
dist
|
19 |
+
.mypy_cache
|
20 |
+
pretrain_data
|
21 |
+
|
22 |
+
per-file-ignores =
|
23 |
+
# __init__.py files are allowed to have unused imports and lines-too-long
|
24 |
+
*/__init__.py:F401,F403
|
25 |
+
*/**/**/__init__.py:F401,E501
|
.gitignore
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# build artifacts
|
2 |
+
|
3 |
+
.eggs/
|
4 |
+
.mypy_cache
|
5 |
+
*.egg-info/
|
6 |
+
build/
|
7 |
+
dist/
|
8 |
+
pip-wheel-metadata/
|
9 |
+
|
10 |
+
|
11 |
+
# dev tools
|
12 |
+
|
13 |
+
.envrc
|
14 |
+
.python-version
|
15 |
+
.idea
|
16 |
+
.venv/
|
17 |
+
.vscode/
|
18 |
+
/*.iml
|
19 |
+
|
20 |
+
|
21 |
+
# jupyter notebooks
|
22 |
+
|
23 |
+
.ipynb_checkpoints
|
24 |
+
|
25 |
+
|
26 |
+
# miscellaneous
|
27 |
+
|
28 |
+
.cache/
|
29 |
+
doc/_build/
|
30 |
+
*.swp
|
31 |
+
.DS_Store
|
32 |
+
|
33 |
+
|
34 |
+
# python
|
35 |
+
|
36 |
+
*.pyc
|
37 |
+
*.pyo
|
38 |
+
__pycache__
|
39 |
+
|
40 |
+
|
41 |
+
# testing and continuous integration
|
42 |
+
|
43 |
+
.coverage
|
44 |
+
.pytest_cache/
|
45 |
+
.benchmarks
|
46 |
+
|
47 |
+
# documentation build artifacts
|
48 |
+
|
49 |
+
docs/build
|
50 |
+
site/
|
51 |
+
|
52 |
+
# runs
|
53 |
+
/runs/
|
54 |
+
/wandb/
|
55 |
+
/scratch/
|
56 |
+
core
|
prepare_for_release.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Prepares PES2O for release on the HugginFace hub.
|
3 |
+
|
4 |
+
Author: Luca Soldaini (@soldni)
|
5 |
+
"""
|
6 |
+
|
7 |
+
import argparse
|
8 |
+
import json
|
9 |
+
from contextlib import ExitStack
|
10 |
+
from functools import partial
|
11 |
+
from multiprocessing import Manager, Pool, cpu_count, set_start_method
|
12 |
+
from queue import Queue
|
13 |
+
from threading import Thread
|
14 |
+
from time import sleep
|
15 |
+
from typing import Optional, Tuple
|
16 |
+
|
17 |
+
from smashed.utils import (
|
18 |
+
MultiPath,
|
19 |
+
compress_stream,
|
20 |
+
decompress_stream,
|
21 |
+
open_file_for_write,
|
22 |
+
recursively_list_files,
|
23 |
+
stream_file_for_read,
|
24 |
+
)
|
25 |
+
from tqdm import tqdm
|
26 |
+
|
27 |
+
|
28 |
+
def process_single(
|
29 |
+
io_paths: Tuple[MultiPath, MultiPath],
|
30 |
+
version: str,
|
31 |
+
pbar_queue: Optional[Queue] = None,
|
32 |
+
):
|
33 |
+
src, dst = io_paths
|
34 |
+
docs_cnt = 0
|
35 |
+
|
36 |
+
with ExitStack() as stack:
|
37 |
+
in_file = stack.enter_context(stream_file_for_read(src, "rb"))
|
38 |
+
in_stream = stack.enter_context(decompress_stream(in_file, "rt"))
|
39 |
+
out_file = stack.enter_context(open_file_for_write(dst, "wb"))
|
40 |
+
out_stream = stack.enter_context(compress_stream(out_file, "wt"))
|
41 |
+
|
42 |
+
for line in in_stream:
|
43 |
+
data = json.loads(line)
|
44 |
+
data.pop("metadata", None)
|
45 |
+
data["source"] = "s2ag" if "dataset=s2ag" in src.as_str else "s2orc"
|
46 |
+
data["split"] = "train" if "split=train" in src.as_str else "valid"
|
47 |
+
data["version"] = version
|
48 |
+
|
49 |
+
out_stream.write(json.dumps(data) + "\n")
|
50 |
+
docs_cnt += 1
|
51 |
+
|
52 |
+
if pbar_queue is not None and docs_cnt % 1000 == 0:
|
53 |
+
pbar_queue.put((0, docs_cnt))
|
54 |
+
docs_cnt = 0
|
55 |
+
|
56 |
+
if pbar_queue is not None:
|
57 |
+
pbar_queue.put((1, docs_cnt))
|
58 |
+
|
59 |
+
|
60 |
+
def threaded_progressbar(q: Queue, timeout: float, total_files: Optional[int] = None):
|
61 |
+
with ExitStack() as stack:
|
62 |
+
files_pbar = stack.enter_context(
|
63 |
+
tqdm(desc=" Files", unit="files", position=0, total=total_files)
|
64 |
+
)
|
65 |
+
docs_pbar = stack.enter_context(
|
66 |
+
tqdm(desc=" Docs", unit=" docs", position=1, unit_scale=True)
|
67 |
+
)
|
68 |
+
|
69 |
+
while True:
|
70 |
+
item = q.get()
|
71 |
+
if item is None:
|
72 |
+
break
|
73 |
+
else:
|
74 |
+
files, docs = item
|
75 |
+
files_pbar.update(files)
|
76 |
+
docs_pbar.update(docs)
|
77 |
+
sleep(timeout)
|
78 |
+
|
79 |
+
|
80 |
+
def main():
|
81 |
+
ap = argparse.ArgumentParser()
|
82 |
+
ap.add_argument("src", type=str, help="Source path")
|
83 |
+
ap.add_argument("dst", type=str, help="Destination path")
|
84 |
+
ap.add_argument(
|
85 |
+
"--debug", default=False, help="Debug mode", action="store_true"
|
86 |
+
)
|
87 |
+
ap.add_argument(
|
88 |
+
"--parallel", type=int, default=cpu_count(), help="Number of parallel processes"
|
89 |
+
)
|
90 |
+
ap.add_argument(
|
91 |
+
"-v", "--version", type=str, required=True, help="Version of the dataset"
|
92 |
+
)
|
93 |
+
opts = ap.parse_args()
|
94 |
+
|
95 |
+
src = MultiPath.parse(opts.src)
|
96 |
+
dst = MultiPath.parse(opts.dst)
|
97 |
+
|
98 |
+
src_paths = [MultiPath.parse(p) for p in recursively_list_files(src)]
|
99 |
+
dst_paths = [
|
100 |
+
dst / (diff) if len(diff := (single_src - src)) > 0 else dst
|
101 |
+
for single_src in src_paths
|
102 |
+
]
|
103 |
+
|
104 |
+
if opts.debug:
|
105 |
+
with tqdm(total=len(src_paths)) as pbar:
|
106 |
+
for single_src, single_dst in zip(src_paths, dst_paths):
|
107 |
+
process_single((single_src, single_dst), version=opts.version)
|
108 |
+
pbar.update(1)
|
109 |
+
|
110 |
+
else:
|
111 |
+
set_start_method("spawn")
|
112 |
+
|
113 |
+
with Pool(processes=opts.parallel) as pool:
|
114 |
+
pbar_queue: Queue = (manager := Manager()).Queue()
|
115 |
+
pbar_thread = Thread(
|
116 |
+
target=threaded_progressbar,
|
117 |
+
args=(pbar_queue, 0.1, len(src_paths)),
|
118 |
+
daemon=True,
|
119 |
+
)
|
120 |
+
pbar_thread.start()
|
121 |
+
|
122 |
+
for _ in pool.imap_unordered(
|
123 |
+
partial(process_single, pbar_queue=pbar_queue, version=opts.version),
|
124 |
+
tuple(zip(src_paths, dst_paths)),
|
125 |
+
):
|
126 |
+
...
|
127 |
+
|
128 |
+
pool.close()
|
129 |
+
pool.join()
|
130 |
+
|
131 |
+
pbar_queue.put(None)
|
132 |
+
pbar_thread.join()
|
133 |
+
manager.shutdown()
|
134 |
+
|
135 |
+
|
136 |
+
if __name__ == "__main__":
|
137 |
+
main()
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
smashed[remote]
|
2 |
+
tqdm
|