Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
Zhangir Azerbayev commited on
Commit
9bdf6a3
1 Parent(s): f293502

added some fiels

Browse files
Files changed (5) hide show
  1. fetch_arxiv.py +272 -0
  2. fetch_wiki.py +43 -29
  3. proof-pile.py +12 -1
  4. test.py +3 -2
  5. utils.py +46 -0
fetch_arxiv.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ from pathlib import Path
4
+ import datetime
5
+
6
+ import tarfile
7
+ import xml.etree.ElementTree as ET
8
+ from tqdm import tqdm
9
+ import re
10
+ from itertools import chain, islice
11
+ import requests
12
+ import time
13
+
14
+ import shutil
15
+
16
+ import arxiv
17
+
18
+ import langdetect
19
+ from langdetect import detect
20
+
21
+ from utils import Loader as Loader
22
+ from utils import make_archive
23
+
24
+ def batch_loader(seq, size):
25
+ """
26
+ Iterator that takes in a list `seq` and returns
27
+ chunks of size `size`
28
+ """
29
+ return [seq[pos:pos + size] for pos in range(0, len(seq), size)]
30
+
31
+
32
+ def _delete_files_except_pattern(path, pattern, transform = lambda x: None, verbose=False):
33
+ """
34
+ recursively
35
+ """
36
+ for f in os.listdir(path):
37
+ f_path = os.path.join(path, f)
38
+ if verbose:
39
+ print(f_path)
40
+ if os.path.isfile(f_path):
41
+ if not re.search(pattern, f):
42
+ os.chmod(f_path, 0o755)
43
+ os.remove(f_path)
44
+ else:
45
+ transform(f_path)
46
+ elif os.path.isdir(f_path):
47
+ try:
48
+ print(f_path)
49
+ except UnicodeEncodeError:
50
+ new_path = f_path.encode("utf-8", 'replace').decode()
51
+ os.system(f"mv \"{f_path}\" \"{new_path}\"")
52
+ f_path = new_path
53
+
54
+ _delete_files_except_pattern(f_path, pattern, transform=transform, verbose=verbose)
55
+
56
+ def _download_with_progress_bar(url):
57
+ response = requests.get(url, stream=True)
58
+ total_size_in_bytes = int(response.headers.get("content-length", 0))
59
+ block_size = 1024 # 1 Kibibyte
60
+ progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
61
+ to_return = bytearray()
62
+ for data in response.iter_content(block_size):
63
+ progress_bar.update(len(data))
64
+ to_return += data
65
+ progress_bar.close()
66
+ if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
67
+ raise AssertionError("ERROR, something went wrong")
68
+
69
+ return to_return
70
+
71
+ def get_math_ids(resumption_token="init"):
72
+ with Loader(f"fetching metadata shard {resumption_token}..."):
73
+ if resumption_token=="init":
74
+ resp = requests.get("https://export.arxiv.org/oai2?verb=ListIdentifiers&set=math&metadataPrefix=oai_dc")
75
+ else:
76
+ time.sleep(5)
77
+ resp = requests.get(f"https://export.arxiv.org/oai2?verb=ListIdentifiers&resumptionToken={resumption_token}")
78
+
79
+ root = ET.fromstring(resp.content.decode("utf-8"))
80
+ articles = root[2]
81
+
82
+ math_ids = {}
83
+ for article in articles:
84
+ if article.tag == "{http://www.openarchives.org/OAI/2.0/}resumptionToken":
85
+ if article.text:
86
+ return math_ids | get_math_ids(resumption_token=article.text)
87
+ else:
88
+ return math_ids
89
+
90
+ db_id = article[0].text
91
+ eyed = db_id[db_id.rindex(":")+1:]
92
+ math_ids[eyed] = True
93
+
94
+ def clean_tex_file(path):
95
+ with open(path, encoding="utf-8") as f:
96
+ try:
97
+ src = f.read()
98
+ except (UnicodeDecodeError, UnicodeError):
99
+ print(f"Decoding error at {path} with utf-8. Trying latin-1")
100
+ try:
101
+ with open(path, encoding="latin-1") as fle:
102
+ src = fle.read()
103
+ #print("latin-1 successful\n")
104
+ except (UnicodeDecodeError, UnicodeError):
105
+ #print(f"Decoding error at {path} with latin-1. Trying utf-16")
106
+ try:
107
+ with open(path, encoding="utf-16") as fl:
108
+ src = fl.read()
109
+ #print("utf-16 successful\n")
110
+ except (UnicodeDecodeError, UnicodeError):
111
+ #print(f"Decoding error at {path} with utf-16. Trying utf-32")
112
+ try:
113
+ with open(path, encoding="utf-32") as f:
114
+ src = f.read()
115
+ except (UnicodeDecodeError, UnicodeError):
116
+ print(f"Decoding error at {path} with all of utf-8, 16, 32 and latin-1. Deleting this file")
117
+ print("This issue should only occur with a handful of quite old files. Continuing...\n")
118
+ return
119
+
120
+ end = re.search(r"\\end\{document\}", src)
121
+ if end:
122
+ src = src[:end.span()[1]]
123
+
124
+ bib = re.search(r"\\Refs|\\begin\{thebibliography\}", src)
125
+ if bib:
126
+ src = src[:bib.span()[0]]
127
+
128
+ os.chmod(path, 0o755)
129
+ with open(path, "w", encoding="utf-8") as f:
130
+ f.write(src)
131
+
132
+ def clean_tex_file_some_more(path):
133
+ with open(path) as f:
134
+ text = f.read()
135
+
136
+ text = re.sub(r"(?<!\\)%.*", "", text)
137
+
138
+ match_obj = re.search(r"\\begin\{document\}", text)
139
+ if match_obj:
140
+ text = text[match_obj.span()[0]:]
141
+
142
+ match_obj = re.search(r"\\begin\{references\}", text)
143
+ if match_obj:
144
+ text = text[:match_obj.span()[0]]
145
+
146
+ text = text.strip()
147
+
148
+ os.remove(path)
149
+ if len(text)>280:
150
+ try:
151
+ print(path)
152
+ except UnicodeEncodeError:
153
+ path = path.encode('utf-8', 'replace').decode()
154
+
155
+ try:
156
+ lang = detect(text)
157
+ except langdetect.lang_detect_exception.LangDetectException:
158
+ # no linguistic features to analyze, delete
159
+ return
160
+
161
+ if lang=="en":
162
+ with open(path, "w") as f:
163
+ f.write(text)
164
+ else:
165
+ print("HIT NONENGLISH ARTICLE")
166
+
167
+ def process_tarball_old_scheme(tarball_name, save_dir):
168
+ tarball_path = os.path.join(save_dir, tarball_name)
169
+ os.system("tar -xf " + tarball_path + " -C " + save_dir)
170
+
171
+ last_ = tarball_name.rfind("_")
172
+ second_last_ = tarball_name.rfind("_", 0, last_)
173
+ subdir = tarball_name[second_last_+1:last_]
174
+
175
+ subpath = os.path.join(save_dir, subdir)
176
+ zipped_names = os.listdir(subpath)
177
+
178
+ for zipped_name in zipped_names:
179
+ if zipped_name[-len(".gz"):]==".gz":
180
+ zipped_path = os.path.join(subpath, zipped_name)
181
+ if re.match(r"math", zipped_name):
182
+ eyed = zipped_name[:-len(".gz")]
183
+ if tarfile.is_tarfile(zipped_path):
184
+ article_dir = os.path.join(subpath, eyed)
185
+ Path(article_dir).mkdir()
186
+ os.system("tar -xzf " + zipped_path + " -C " + article_dir)
187
+ os.remove(zipped_path)
188
+ else:
189
+ os.system("gzip -d " + zipped_path)
190
+ unzipped_path = os.path.join(subpath, eyed)
191
+ os.rename(unzipped_path, unzipped_path + ".tex")
192
+ else:
193
+ os.remove(zipped_path)
194
+
195
+ _delete_files_except_pattern(subpath, r".*\.tex", transform=clean_tex_file)
196
+ os.remove(tarball_path)
197
+
198
+ def process_tarball(tarball_name, save_dir, math_ids):
199
+ tarball_path = os.path.join(save_dir, tarball_name)
200
+ untar_cmd = "tar -xf " + tarball_path + " -C " + save_dir
201
+ os.system(untar_cmd)
202
+
203
+ last_ = tarball_name.rfind("_")
204
+ second_last_ = tarball_name.rfind("_", 0, last_)
205
+ subdir = tarball_name[second_last_+1:last_]
206
+
207
+ subpath = os.path.join(save_dir, subdir)
208
+ listdir = os.listdir(subpath)
209
+
210
+ ids = [x[:-3] for x in listdir if x[-3:]==".gz"]
211
+
212
+ for eyed in ids:
213
+ if eyed in math_ids:
214
+ zipped_path = os.path.join(subpath, eyed + ".gz")
215
+
216
+ if tarfile.is_tarfile(zipped_path):
217
+ article_dir = os.path.join(subpath, eyed)
218
+ Path(article_dir).mkdir()
219
+ os.system("tar -xzf " + zipped_path + " -C " + article_dir)
220
+ os.remove(zipped_path)
221
+ else:
222
+ os.system("gzip -d " + zipped_path)
223
+ unzipped_path = os.path.join(subpath, eyed)
224
+ os.rename(unzipped_path, unzipped_path + ".tex")
225
+
226
+ _delete_files_except_pattern(subpath, r".*\.tex", transform=clean_tex_file)
227
+ os.remove(tarball_path)
228
+
229
+ def main():
230
+ """
231
+ Warning: this code is *extremely* brittle
232
+ """
233
+ math_ids = get_math_ids()
234
+
235
+ save_dir = "arxiv_1"
236
+ Path(save_dir).mkdir(exist_ok=True)
237
+ manifest_path = os.path.join(save_dir, "manifest.xml")
238
+
239
+ os.system(f"s3cmd get s3://arxiv/src/arXiv_src_manifest.xml --requester-pays {manifest_path}")
240
+
241
+ tree = ET.parse(manifest_path)
242
+ root = tree.getroot()
243
+
244
+ shards_and_dates = []
245
+ for child in root:
246
+ if child.tag == "file":
247
+ shard = child[1].text # the index of filename
248
+ yymm = child[9].text # the index of yymm
249
+ shards_and_dates.append((shard, yymm))
250
+
251
+ format_cutoff = datetime.datetime(2007, 3, 1) # arXiv switches from old to new format
252
+ for shard, yymm in tqdm(shards_and_dates):
253
+ print("SHARD: ", shard)
254
+ os.system(f"s3cmd get s3://arxiv/" + shard + \
255
+ " --requester-pays " + save_dir)
256
+ tarball_name=shard[shard.rindex("/")+1:]
257
+
258
+ # nb this code will stop working in 2051 ;)
259
+ year = int("19" + yymm[:2]) if int(yymm[:2])>50 else int("20"+yymm[:2])
260
+ if datetime.datetime(year, int(yymm[2:]), 1)<=format_cutoff:
261
+ process_tarball_old_scheme(tarball_name, save_dir)
262
+ else:
263
+ process_tarball(tarball_name, save_dir, math_ids)
264
+
265
+ os.remove(manifest_path)
266
+
267
+ if __name__=="__main__":
268
+ #main()
269
+ #_delete_files_except_pattern("arxiv_1", r".*\.tex$", transform=clean_tex_file_some_more)
270
+ for f in tqdm(os.listdir("arxiv")):
271
+ f_path = os.path.join("arxiv", f)
272
+ make_archive(f_path)
fetch_wiki.py CHANGED
@@ -6,8 +6,14 @@ import re
6
  import pypandoc
7
  import json
8
  from pathlib import Path
 
9
  from fetch_books_and_formal import _download_with_progress_bar
10
- from fetch_mathoverflow import batch_loader
 
 
 
 
 
11
 
12
  def page_titles_of_category(cat_page):
13
  """
@@ -43,8 +49,11 @@ PROOFWIKI_URL = (
43
  "https://zenodo.org/record/4902289/files/naturalproofs_proofwiki.json?download=1"
44
  )
45
  def proofwiki(testing=False):
 
46
  save_dir = "wiki/proofwiki"
 
47
  Path(save_dir).mkdir(parents=True, exist_ok=True)
 
48
 
49
  if testing:
50
  with open("naturalproofs/proofwiki.json") as f:
@@ -55,48 +64,53 @@ def proofwiki(testing=False):
55
  struct = json.loads(resp.decode("utf-8"))
56
  print("DONE DOWNLOADING PROOFWIKI")
57
 
58
- batches = batch_loader(struct["dataset"]["theorems"], 500)
59
- for i, batch in enumerate(batches):
60
- thms_list = []
61
- for thm in batch:
62
- if thm["contents"]:
63
- thm_string = "\\section{" + thm["label"] + "}\n"
64
- thm_string += (
65
- "Tags: " + ", ".join(thm["categories"]).replace("/", ": ") + "\n\n"
66
- )
67
 
 
 
 
 
 
 
 
68
  thm_string += (
69
- "\\begin{theorem}\n"
70
- + "\n".join(thm["contents"])
71
- + "\n\\end{theorem}\n\n"
72
  )
73
 
74
- for proof in thm["proofs"]:
75
- thm_string += (
76
- "\\begin{proof}\n"
77
- + "\n".join(proof["contents"])
78
- + "\n\\end{proof}\n\n"
79
- )
80
-
81
- thms_list.append(thm_string.strip())
82
-
83
- with open(os.path.join(save_dir, f"shard_{i}.txt"), "w") as f:
84
- f.write("<|endoftext|>\n".join(thms_list))
85
 
86
  defn_strings = []
87
  for defn in struct["dataset"]["definitions"]:
88
  if defn["contents"]:
89
- defn_strings.append((
90
  "\\begin{definition}["
91
  + defn["label"]
92
  + "]\n"
93
  + "\n".join(defn["contents"])
94
  + "\n\\end{definition}").strip()
95
- )
96
-
97
- with open(os.path.join(save_dir, "defs.txt"), "w") as f:
98
- f.write("<|endoftext|>\n".join(defn_strings))
 
 
 
 
99
 
100
  if __name__=="__main__":
101
  #wikipedia()
102
  proofwiki()
 
 
 
6
  import pypandoc
7
  import json
8
  from pathlib import Path
9
+
10
  from fetch_books_and_formal import _download_with_progress_bar
11
+ from utils import make_archive
12
+
13
+ import random
14
+
15
+ random.seed(20)
16
+
17
 
18
  def page_titles_of_category(cat_page):
19
  """
 
49
  "https://zenodo.org/record/4902289/files/naturalproofs_proofwiki.json?download=1"
50
  )
51
  def proofwiki(testing=False):
52
+ VAL_RATE = 0.025
53
  save_dir = "wiki/proofwiki"
54
+ val_dir = "wiki/proofwiki_val"
55
  Path(save_dir).mkdir(parents=True, exist_ok=True)
56
+ Path(val_dir).mkdir(parents=True, exist_ok=True)
57
 
58
  if testing:
59
  with open("naturalproofs/proofwiki.json") as f:
 
64
  struct = json.loads(resp.decode("utf-8"))
65
  print("DONE DOWNLOADING PROOFWIKI")
66
 
67
+ for i, thm in enumerate(struct["dataset"]["theorems"]):
68
+ if thm["contents"]:
69
+ thm_string = "\\section{" + thm["label"] + "}\n"
70
+ thm_string += (
71
+ "Tags: " + ", ".join(thm["categories"]).replace("/", ": ") + "\n\n"
72
+ )
 
 
 
73
 
74
+ thm_string += (
75
+ "\\begin{theorem}\n"
76
+ + "\n".join(thm["contents"])
77
+ + "\n\\end{theorem}\n\n"
78
+ )
79
+
80
+ for proof in thm["proofs"]:
81
  thm_string += (
82
+ "\\begin{proof}\n"
83
+ + "\n".join(proof["contents"])
84
+ + "\n\\end{proof}\n\n"
85
  )
86
 
87
+ if random.random()>VAL_RATE:
88
+ with open(os.path.join(save_dir, f"""thm_{thm["id"]}.txt"""), "w") as f:
89
+ f.write(thm_string)
90
+ else:
91
+ with open(os.path.join(val_dir, f"""thm_{thm["id"]}.txt"""), "w") as f:
92
+ f.write(thm_string)
 
 
 
 
 
93
 
94
  defn_strings = []
95
  for defn in struct["dataset"]["definitions"]:
96
  if defn["contents"]:
97
+ defn_string = (
98
  "\\begin{definition}["
99
  + defn["label"]
100
  + "]\n"
101
  + "\n".join(defn["contents"])
102
  + "\n\\end{definition}").strip()
103
+
104
+ if random.random()>VAL_RATE:
105
+ with open(os.path.join(save_dir, f"""def_{defn["id"]}.txt"""), "w") as f:
106
+ f.write(defn_string)
107
+ else:
108
+ with open(os.path.join(val_dir, f"""def_{defn["id"]}.txt"""), "w") as f:
109
+ f.write(defn_string)
110
+
111
 
112
  if __name__=="__main__":
113
  #wikipedia()
114
  proofwiki()
115
+ make_archive("wiki/proofwiki")
116
+ make_archive("wiki/proofwiki_val")
proof-pile.py CHANGED
@@ -75,6 +75,7 @@ class ProofPile(datasets.GeneratorBasedBuilder):
75
  # data = datasets.load_dataset('my_dataset', 'first_domain')
76
  # data = datasets.load_dataset('my_dataset', 'second_domain')
77
  BUILDER_CONFIGS = [
 
78
  datasets.BuilderConfig(name="books", version=VERSION, description="Open source math textbooks"),
79
  datasets.BuilderConfig(name="formal", version=VERSION, description="Formal math libraries"),
80
  datasets.BuilderConfig(name="stack-exchange", version=VERSION, description="math overflow and math stack exchange"),
@@ -119,9 +120,19 @@ class ProofPile(datasets.GeneratorBasedBuilder):
119
  with open("splits.json") as f:
120
  splits = json.load(f)
121
 
122
- self.archived_configs = ["stack-exchange", "math-dataset", "wiki"]
123
 
124
  if self.config.name in self.archived_configs:
 
 
 
 
 
 
 
 
 
 
125
  if self.config.name=="stack-exchange":
126
  train_paths = [os.path.join("./stack-exchange", x) for x in ["math_overflow.tar.gz",
127
  "math_stack_exchange.tar.gz"]]
 
75
  # data = datasets.load_dataset('my_dataset', 'first_domain')
76
  # data = datasets.load_dataset('my_dataset', 'second_domain')
77
  BUILDER_CONFIGS = [
78
+ datasets.BuilderConfig(name="arxiv", version=VERSION, description="All of English arxiv.math up to 03/22"),
79
  datasets.BuilderConfig(name="books", version=VERSION, description="Open source math textbooks"),
80
  datasets.BuilderConfig(name="formal", version=VERSION, description="Formal math libraries"),
81
  datasets.BuilderConfig(name="stack-exchange", version=VERSION, description="math overflow and math stack exchange"),
 
120
  with open("splits.json") as f:
121
  splits = json.load(f)
122
 
123
+ self.archived_configs = ["arxiv", "stack-exchange", "math-dataset", "wiki"]
124
 
125
  if self.config.name in self.archived_configs:
126
+ if self.config.name=="arxiv":
127
+ train_paths = []
128
+ val_paths = []
129
+ for f in os.listdir("arxiv"):
130
+ f_path = os.path.join("./arxiv", f)
131
+ # validation set is june of years divisible by 4
132
+ if int(f[1])%4==0 and int(f[3])==6:
133
+ val_paths.append(f_path)
134
+ else:
135
+ train_paths.append(f_path)
136
  if self.config.name=="stack-exchange":
137
  train_paths = [os.path.join("./stack-exchange", x) for x in ["math_overflow.tar.gz",
138
  "math_stack_exchange.tar.gz"]]
test.py CHANGED
@@ -2,6 +2,9 @@ from datasets import load_dataset
2
  from itertools import islice
3
  import sys
4
 
 
 
 
5
  """
6
  dataset = load_dataset("./proof-pile.py", "books")
7
  print("BOOKS")
@@ -20,11 +23,9 @@ for x in dataset["train"]:
20
  print(x)
21
  break
22
  print(dataset)
23
- """
24
  dataset = load_dataset("./proof-pile.py", "wiki")
25
  print("WIKI")
26
  print(dataset)
27
- """
28
  dataset = load_dataset("./proof-pile.py", "math-dataset", download_mode='force_redownload')
29
  print("MATH DATASET")
30
  print(dataset)
 
2
  from itertools import islice
3
  import sys
4
 
5
+ dataset = load_dataset("./proof-pile.py", "arxiv")
6
+ print('ARXIV')
7
+ print(dataset)
8
  """
9
  dataset = load_dataset("./proof-pile.py", "books")
10
  print("BOOKS")
 
23
  print(x)
24
  break
25
  print(dataset)
 
26
  dataset = load_dataset("./proof-pile.py", "wiki")
27
  print("WIKI")
28
  print(dataset)
 
29
  dataset = load_dataset("./proof-pile.py", "math-dataset", download_mode='force_redownload')
30
  print("MATH DATASET")
31
  print(dataset)
utils.py CHANGED
@@ -1,7 +1,53 @@
1
  import os
2
  import tarfile
 
 
 
 
3
 
4
  def make_archive(path):
5
  with tarfile.open(path + ".tar.gz", "w:gz") as tar:
6
  tar.add(path, arcname=os.path.sep)
7
  os.system(f"rm -r {path}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import tarfile
3
+ from itertools import cycle
4
+ from shutil import get_terminal_size
5
+ from threading import Thread
6
+ from time import sleep
7
 
8
  def make_archive(path):
9
  with tarfile.open(path + ".tar.gz", "w:gz") as tar:
10
  tar.add(path, arcname=os.path.sep)
11
  os.system(f"rm -r {path}")
12
+
13
+ class Loader:
14
+ def __init__(self, desc="Loading...", end="Done!", timeout=0.1):
15
+ """
16
+ A loader-like context manager
17
+
18
+ Args:
19
+ desc (str, optional): The loader's description. Defaults to "Loading...".
20
+ end (str, optional): Final print. Defaults to "Done!".
21
+ timeout (float, optional): Sleep time between prints. Defaults to 0.1.
22
+ """
23
+ self.desc = desc
24
+ self.end = end
25
+ self.timeout = timeout
26
+
27
+ self._thread = Thread(target=self._animate, daemon=True)
28
+ self.steps = ["⢿", "⣻", "⣽", "⣾", "⣷", "⣯", "⣟", "⡿"]
29
+ self.done = False
30
+
31
+ def start(self):
32
+ self._thread.start()
33
+ return self
34
+
35
+ def _animate(self):
36
+ for c in cycle(self.steps):
37
+ if self.done:
38
+ break
39
+ print(f"\r{self.desc} {c}", flush=True, end="")
40
+ sleep(self.timeout)
41
+
42
+ def __enter__(self):
43
+ self.start()
44
+
45
+ def stop(self):
46
+ self.done = True
47
+ cols = get_terminal_size((80, 20)).columns
48
+ print("\r" + " " * cols, end="", flush=True)
49
+ print(f"\r{self.end}", flush=True)
50
+
51
+ def __exit__(self, exc_type, exc_value, tb):
52
+ # handle exceptions with those variables ^
53
+ self.stop()