Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
English
Size:
100K - 1M
License:
File size: 3,603 Bytes
c0ecdaa 811340b c0ecdaa 811340b 9bdf6a3 811340b 9bdf6a3 c0ecdaa 811340b c0ecdaa 811340b 9bdf6a3 811340b 9bdf6a3 811340b 9bdf6a3 c0ecdaa 811340b 9bdf6a3 811340b 9bdf6a3 811340b 9bdf6a3 811340b c0ecdaa 9bdf6a3 811340b 9bdf6a3 811340b 9bdf6a3 811340b 9bdf6a3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 |
from bs4 import BeautifulSoup as bs
import os
import wikipediaapi
import sys
import re
import pypandoc
import json
from pathlib import Path
from fetch_books_and_formal import _download_with_progress_bar
from utils import make_archive
import random
random.seed(20)
def page_titles_of_category(cat_page):
"""
recursively
"""
titles = []
for member in cat_page.categorymembers.values():
if member.ns == wikipediaapi.Namespace.MAIN:
titles.append(member.title)
elif member.ns == wikipediaapi.Namespace.CATEGORY:
titles += page_titles_of_category(member)
return titles
def wikipedia():
"""
this doesnt work dont run it
"""
init_categories = [
#"Category:Mathematical_theorems",
"Category:Mathematical_proofs",
#"Category:Mathematical_examples",
#"Category:Mathematical_problems",
#"Category:Mathematical_terminology",
]
title_set = set()
for cat_name in init_categories:
print(cat_name + "...")
title_set = title_set.union(page_titles_of_category(wiki.page(cat_name)))
PROOFWIKI_URL = (
"https://zenodo.org/record/4902289/files/naturalproofs_proofwiki.json?download=1"
)
def proofwiki(testing=False):
VAL_RATE = 0.025
save_dir = "wiki/proofwiki"
val_dir = "wiki/proofwiki_val"
Path(save_dir).mkdir(parents=True, exist_ok=True)
Path(val_dir).mkdir(parents=True, exist_ok=True)
if testing:
with open("naturalproofs/proofwiki.json") as f:
struct = json.load(f)
else:
print("DOWNLOADING PROOFWIKI")
resp = _download_with_progress_bar(PROOFWIKI_URL)
struct = json.loads(resp.decode("utf-8"))
print("DONE DOWNLOADING PROOFWIKI")
for i, thm in enumerate(struct["dataset"]["theorems"]):
if thm["contents"]:
thm_string = "\\section{" + thm["label"] + "}\n"
thm_string += (
"Tags: " + ", ".join(thm["categories"]).replace("/", ": ") + "\n\n"
)
thm_string += (
"\\begin{theorem}\n"
+ "\n".join(thm["contents"])
+ "\n\\end{theorem}\n\n"
)
for proof in thm["proofs"]:
thm_string += (
"\\begin{proof}\n"
+ "\n".join(proof["contents"])
+ "\n\\end{proof}\n\n"
)
if random.random()>VAL_RATE:
with open(os.path.join(save_dir, f"""thm_{thm["id"]}.txt"""), "w") as f:
f.write(thm_string)
else:
with open(os.path.join(val_dir, f"""thm_{thm["id"]}.txt"""), "w") as f:
f.write(thm_string)
defn_strings = []
for defn in struct["dataset"]["definitions"]:
if defn["contents"]:
defn_string = (
"\\begin{definition}["
+ defn["label"]
+ "]\n"
+ "\n".join(defn["contents"])
+ "\n\\end{definition}").strip()
if random.random()>VAL_RATE:
with open(os.path.join(save_dir, f"""def_{defn["id"]}.txt"""), "w") as f:
f.write(defn_string)
else:
with open(os.path.join(val_dir, f"""def_{defn["id"]}.txt"""), "w") as f:
f.write(defn_string)
if __name__=="__main__":
#wikipedia()
proofwiki()
make_archive("wiki/proofwiki")
make_archive("wiki/proofwiki_val")
|