Spaces:
Running
Running
BlankCheng
commited on
Commit
•
0017b35
1
Parent(s):
ac7d8cf
Update the non-web part.
Browse files- curated.py +84 -27
curated.py
CHANGED
@@ -33,7 +33,9 @@ curated_sources_intro = Div(
|
|
33 |
P(
|
34 |
"Curated sources comprise high-quality datasets that contain domain-specificity.",
|
35 |
B(
|
36 |
-
" TxT360 was strongly influenced by The Pile",
|
|
|
|
|
37 |
),
|
38 |
" These sources, such as Arxiv, Wikipedia, and Stack Exchange, provide valuable data that is excluded from the web dataset mentioned above. Analyzing and processing non-web data can yield insights and opportunities for various applications. Details about each of the sources are provided below. ",
|
39 |
),
|
@@ -138,7 +140,9 @@ wikipedia_filter = pd.DataFrame(
|
|
138 |
)
|
139 |
|
140 |
table_html_wikipedia = wikipedia_filter.to_html(index=False, border=0)
|
141 |
-
table_div_wikipedia = Div(
|
|
|
|
|
142 |
|
143 |
freelaw_filter = pd.DataFrame(
|
144 |
{
|
@@ -167,7 +171,9 @@ freelaw_filter = pd.DataFrame(
|
|
167 |
)
|
168 |
|
169 |
table_html_freelaw = freelaw_filter.to_html(index=False, border=0)
|
170 |
-
table_div_freelaw = Div(
|
|
|
|
|
171 |
|
172 |
dmm_filter = pd.DataFrame(
|
173 |
{
|
@@ -196,7 +202,9 @@ dmm_filter = pd.DataFrame(
|
|
196 |
)
|
197 |
|
198 |
table_html_dmm = dmm_filter.to_html(index=False, border=0)
|
199 |
-
table_div_dmm = Div(
|
|
|
|
|
200 |
|
201 |
|
202 |
uspto_filter = pd.DataFrame(
|
@@ -226,7 +234,9 @@ uspto_filter = pd.DataFrame(
|
|
226 |
)
|
227 |
|
228 |
table_html_uspto = uspto_filter.to_html(index=False, border=0)
|
229 |
-
table_div_uspto = Div(
|
|
|
|
|
230 |
|
231 |
pg19_filter = pd.DataFrame(
|
232 |
{
|
@@ -255,7 +265,9 @@ pg19_filter = pd.DataFrame(
|
|
255 |
)
|
256 |
|
257 |
table_html_pg19 = pg19_filter.to_html(index=False, border=0)
|
258 |
-
table_div_pg19 = Div(
|
|
|
|
|
259 |
|
260 |
|
261 |
hn_filter = pd.DataFrame(
|
@@ -285,7 +297,9 @@ hn_filter = pd.DataFrame(
|
|
285 |
)
|
286 |
|
287 |
table_html_hn = hn_filter.to_html(index=False, border=0)
|
288 |
-
table_div_hn = Div(
|
|
|
|
|
289 |
|
290 |
|
291 |
uirc_filter = pd.DataFrame(
|
@@ -315,7 +329,9 @@ uirc_filter = pd.DataFrame(
|
|
315 |
)
|
316 |
|
317 |
table_html_uirc = uirc_filter.to_html(index=False, border=0)
|
318 |
-
table_div_uirc = Div(
|
|
|
|
|
319 |
|
320 |
up_filter = pd.DataFrame(
|
321 |
{
|
@@ -344,7 +360,9 @@ up_filter = pd.DataFrame(
|
|
344 |
)
|
345 |
|
346 |
table_html_up = up_filter.to_html(index=False, border=0)
|
347 |
-
table_div_up = Div(
|
|
|
|
|
348 |
|
349 |
se_filter = pd.DataFrame(
|
350 |
{
|
@@ -373,7 +391,9 @@ se_filter = pd.DataFrame(
|
|
373 |
)
|
374 |
|
375 |
table_html_se = se_filter.to_html(index=False, border=0)
|
376 |
-
table_div_se = Div(
|
|
|
|
|
377 |
|
378 |
arx_filter = pd.DataFrame(
|
379 |
{
|
@@ -402,7 +422,9 @@ arx_filter = pd.DataFrame(
|
|
402 |
)
|
403 |
|
404 |
table_html_arx = arx_filter.to_html(index=False, border=0)
|
405 |
-
table_div_arx = Div(
|
|
|
|
|
406 |
|
407 |
s2o_filter = pd.DataFrame(
|
408 |
{
|
@@ -431,7 +453,9 @@ s2o_filter = pd.DataFrame(
|
|
431 |
)
|
432 |
|
433 |
table_html_s2o = s2o_filter.to_html(index=False, border=0)
|
434 |
-
table_div_s2o = Div(
|
|
|
|
|
435 |
|
436 |
med_filter = pd.DataFrame(
|
437 |
{
|
@@ -460,7 +484,9 @@ med_filter = pd.DataFrame(
|
|
460 |
)
|
461 |
|
462 |
table_html_med = med_filter.to_html(index=False, border=0)
|
463 |
-
table_div_med = Div(
|
|
|
|
|
464 |
|
465 |
phil_filter = pd.DataFrame(
|
466 |
{
|
@@ -489,7 +515,9 @@ phil_filter = pd.DataFrame(
|
|
489 |
)
|
490 |
|
491 |
table_html_phil = phil_filter.to_html(index=False, border=0)
|
492 |
-
table_div_phil = Div(
|
|
|
|
|
493 |
## end individual tables showing filterin
|
494 |
|
495 |
|
@@ -681,24 +709,51 @@ filtering_process = Div(
|
|
681 |
P(
|
682 |
B("Download and Extraction: "),
|
683 |
"All the data was downloaded in original latex format from ArXiv official S3 repo: ",
|
684 |
-
A("s3://
|
685 |
-
". We
|
|
|
|
|
686 |
D_code(
|
687 |
-
"pandoc -s
|
688 |
-
language="
|
689 |
),
|
690 |
-
".
|
691 |
),
|
692 |
P(B("Unique Data Preparation Challenges: ")),
|
|
|
693 |
Ul(
|
694 |
Li(
|
695 |
-
"
|
|
|
696 |
style="margin-bottom: -3px",
|
697 |
),
|
698 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
699 |
P(
|
700 |
B(" Filters Applied: "),
|
701 |
-
"multiple filters are used here after manually verifying output of all the filters as suggested by peS2o dataset",
|
|
|
702 |
),
|
703 |
Ul(
|
704 |
Li(
|
@@ -851,13 +906,13 @@ filtering_process = Div(
|
|
851 |
href="ttps://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_package/",
|
852 |
),
|
853 |
". PubMed Central (PMC) files are downloaded in an xml.tar format. The tar files are opened and converted to markdown format using pandoc",
|
854 |
-
D_code("pandoc -f jats
|
855 |
-
". The markdown files are combined to create jsonl files. PubMed Abstract (PMA) files
|
856 |
),
|
857 |
P(B("Unique Data Preparation Challenges: ")),
|
858 |
Ul(
|
859 |
Li(
|
860 |
-
"
|
861 |
style="margin-bottom: -3px",
|
862 |
),
|
863 |
),
|
@@ -1584,7 +1639,8 @@ def curated():
|
|
1584 |
table_html = data_preparation_steps.to_html(index=False, border=0)
|
1585 |
table_div = Div(NotStr(table_html), style="margin: 40px;")
|
1586 |
|
1587 |
-
text = P(
|
|
|
1588 |
process. Here, we focus on acquiring and extracting the raw data, which can
|
1589 |
come from various sources such as crawling websites, using HTTP/FTP dumps,
|
1590 |
or working with archive dumps. For instance, to download and prepare a
|
@@ -1594,7 +1650,8 @@ def curated():
|
|
1594 |
preparation process: It is worth noting that some pipelines might require
|
1595 |
invoking additional functions or scripts to handle specific data sources or
|
1596 |
formats. These helper scripts can be located within specific directories
|
1597 |
-
or modules dedicated to the dataset."""
|
|
|
1598 |
|
1599 |
return Div(
|
1600 |
Section(
|
|
|
33 |
P(
|
34 |
"Curated sources comprise high-quality datasets that contain domain-specificity.",
|
35 |
B(
|
36 |
+
" TxT360 was strongly influenced by The Pile",
|
37 |
+
D_cite(bibtex_key="thepile"),
|
38 |
+
" regarding both inclusion of the dataset and filtering techniques.",
|
39 |
),
|
40 |
" These sources, such as Arxiv, Wikipedia, and Stack Exchange, provide valuable data that is excluded from the web dataset mentioned above. Analyzing and processing non-web data can yield insights and opportunities for various applications. Details about each of the sources are provided below. ",
|
41 |
),
|
|
|
140 |
)
|
141 |
|
142 |
table_html_wikipedia = wikipedia_filter.to_html(index=False, border=0)
|
143 |
+
table_div_wikipedia = Div(
|
144 |
+
NotStr(table_html_wikipedia), style="margin-left: auto; width: 80%; align: center;"
|
145 |
+
)
|
146 |
|
147 |
freelaw_filter = pd.DataFrame(
|
148 |
{
|
|
|
171 |
)
|
172 |
|
173 |
table_html_freelaw = freelaw_filter.to_html(index=False, border=0)
|
174 |
+
table_div_freelaw = Div(
|
175 |
+
NotStr(table_html_freelaw), style="margin-left: auto; width: 80%; align: center;"
|
176 |
+
)
|
177 |
|
178 |
dmm_filter = pd.DataFrame(
|
179 |
{
|
|
|
202 |
)
|
203 |
|
204 |
table_html_dmm = dmm_filter.to_html(index=False, border=0)
|
205 |
+
table_div_dmm = Div(
|
206 |
+
NotStr(table_html_dmm), style="margin-left: auto; width: 80%; align: center;"
|
207 |
+
)
|
208 |
|
209 |
|
210 |
uspto_filter = pd.DataFrame(
|
|
|
234 |
)
|
235 |
|
236 |
table_html_uspto = uspto_filter.to_html(index=False, border=0)
|
237 |
+
table_div_uspto = Div(
|
238 |
+
NotStr(table_html_uspto), style="margin-left: auto; width: 80%; align: center;"
|
239 |
+
)
|
240 |
|
241 |
pg19_filter = pd.DataFrame(
|
242 |
{
|
|
|
265 |
)
|
266 |
|
267 |
table_html_pg19 = pg19_filter.to_html(index=False, border=0)
|
268 |
+
table_div_pg19 = Div(
|
269 |
+
NotStr(table_html_pg19), style="margin-left: auto; width: 80%; align: center;"
|
270 |
+
)
|
271 |
|
272 |
|
273 |
hn_filter = pd.DataFrame(
|
|
|
297 |
)
|
298 |
|
299 |
table_html_hn = hn_filter.to_html(index=False, border=0)
|
300 |
+
table_div_hn = Div(
|
301 |
+
NotStr(table_html_hn), style="margin-left: auto; width: 80%; align: center;"
|
302 |
+
)
|
303 |
|
304 |
|
305 |
uirc_filter = pd.DataFrame(
|
|
|
329 |
)
|
330 |
|
331 |
table_html_uirc = uirc_filter.to_html(index=False, border=0)
|
332 |
+
table_div_uirc = Div(
|
333 |
+
NotStr(table_html_uirc), style="margin-left: auto; width: 80%; align: center;"
|
334 |
+
)
|
335 |
|
336 |
up_filter = pd.DataFrame(
|
337 |
{
|
|
|
360 |
)
|
361 |
|
362 |
table_html_up = up_filter.to_html(index=False, border=0)
|
363 |
+
table_div_up = Div(
|
364 |
+
NotStr(table_html_up), style="margin-left: auto; width: 80%; align: center;"
|
365 |
+
)
|
366 |
|
367 |
se_filter = pd.DataFrame(
|
368 |
{
|
|
|
391 |
)
|
392 |
|
393 |
table_html_se = se_filter.to_html(index=False, border=0)
|
394 |
+
table_div_se = Div(
|
395 |
+
NotStr(table_html_se), style="margin-left: auto; width: 80%; align: center;"
|
396 |
+
)
|
397 |
|
398 |
arx_filter = pd.DataFrame(
|
399 |
{
|
|
|
422 |
)
|
423 |
|
424 |
table_html_arx = arx_filter.to_html(index=False, border=0)
|
425 |
+
table_div_arx = Div(
|
426 |
+
NotStr(table_html_arx), style="margin-left: auto; width: 80%; align: center;"
|
427 |
+
)
|
428 |
|
429 |
s2o_filter = pd.DataFrame(
|
430 |
{
|
|
|
453 |
)
|
454 |
|
455 |
table_html_s2o = s2o_filter.to_html(index=False, border=0)
|
456 |
+
table_div_s2o = Div(
|
457 |
+
NotStr(table_html_s2o), style="margin-left: auto; width: 80%; align: center;"
|
458 |
+
)
|
459 |
|
460 |
med_filter = pd.DataFrame(
|
461 |
{
|
|
|
484 |
)
|
485 |
|
486 |
table_html_med = med_filter.to_html(index=False, border=0)
|
487 |
+
table_div_med = Div(
|
488 |
+
NotStr(table_html_med), style="margin-left: auto; width: 80%; align: center;"
|
489 |
+
)
|
490 |
|
491 |
phil_filter = pd.DataFrame(
|
492 |
{
|
|
|
515 |
)
|
516 |
|
517 |
table_html_phil = phil_filter.to_html(index=False, border=0)
|
518 |
+
table_div_phil = Div(
|
519 |
+
NotStr(table_html_phil), style="margin-left: auto; width: 80%; align: center;"
|
520 |
+
)
|
521 |
## end individual tables showing filterin
|
522 |
|
523 |
|
|
|
709 |
P(
|
710 |
B("Download and Extraction: "),
|
711 |
"All the data was downloaded in original latex format from ArXiv official S3 repo: ",
|
712 |
+
A("s3://arxiv/src", href="s3://arxiv/src"),
|
713 |
+
". We aim to encode the downloaded data in UTF-8 format, and when necessary, utilize the chardet library to infer the appropriate encoding. After that, we use ",
|
714 |
+
A("Pandoc", href="https://pandoc.org/"),
|
715 |
+
" to extract information from the latex files into markdown format. The command we use is",
|
716 |
D_code(
|
717 |
+
"pandoc <raw_tex_path> -s -o <output_markdown_path> -f latex+raw_tex -t markdown_mmd [--lua-filter <lua_filter_path>]",
|
718 |
+
language="bash",
|
719 |
),
|
720 |
+
". Finally, all markdowns were combined to create jsonl files.",
|
721 |
),
|
722 |
P(B("Unique Data Preparation Challenges: ")),
|
723 |
+
P("When converting LaTeX files into Markdown using Pandoc, it is crucial to account for different data formats to minimize information loss while also filtering out noisy content in LaTeX. Below, we outline our considerations and methods for handling various data types during this conversion process:"),
|
724 |
Ul(
|
725 |
Li(
|
726 |
+
B("Tables: "),
|
727 |
+
"The process for handling tables follows three main approaches. First, tables compatible with Pandoc’s built-in formats are directly converted into reliable Markdown tables. While table wrappers are removed by default, they can be reintroduced using any desired symbols. Notably, LaTeX’s `\\multicolumn` and `\\multirow` commands are successfully translated into valid Markdown tables. Second, tables unsupported by Pandoc’s native functionality, such as `deluxetable` or other complex LaTeX types, are preserved in their original LaTeX format to maintain the integrity of complex structures. Third, certain tables are converted to HTML web tables. Although the exact conditions for this conversion are unclear, the resulting HTML format is correctly structured.",
|
728 |
style="margin-bottom: -3px",
|
729 |
),
|
730 |
+
Li(
|
731 |
+
B("Mathematical Expressions: "),
|
732 |
+
"Inline mathematical expressions are rendered in Markdown using `$...$` or `$$...$$` wrappers. More complex equations remain unchanged and are presented as `\\begin{aligned}` blocks to ensure accuracy and readability.",
|
733 |
+
style="margin-bottom: -3px",
|
734 |
+
),
|
735 |
+
Li(
|
736 |
+
B("Figures: "),
|
737 |
+
"All figures are removed during the conversion process. Placeholder figures might not contribute to the paper’s data quality and, as such, have been omitted to streamline the output.",
|
738 |
+
style="margin-bottom: -3px",
|
739 |
+
),
|
740 |
+
Li(
|
741 |
+
B("Section Headers: "),
|
742 |
+
"Section headers are converted into markdown format, using leading `#` symbols to represent the heading levels.",
|
743 |
+
style="margin-bottom: -3px",
|
744 |
+
),
|
745 |
+
Li(
|
746 |
+
B("References: "),
|
747 |
+
"References are removed. Although they may be informative, references often introduce formatting inconsistencies or add little value compared to the core content of the paper.",
|
748 |
+
style="margin-bottom: -3px",
|
749 |
+
)
|
750 |
+
)
|
751 |
+
|
752 |
+
|
753 |
P(
|
754 |
B(" Filters Applied: "),
|
755 |
+
"multiple filters are used here after manually verifying output of all the filters as suggested by peS2o dataset",
|
756 |
+
D_cite(bibtex_key="peS2o"),
|
757 |
),
|
758 |
Ul(
|
759 |
Li(
|
|
|
906 |
href="ttps://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_package/",
|
907 |
),
|
908 |
". PubMed Central (PMC) files are downloaded in an xml.tar format. The tar files are opened and converted to markdown format using pandoc",
|
909 |
+
D_code("pandoc <raw_xml_path> -s -o <output_markdown_path> -f jats -t markdown_mmd [--lua-filter <lua_filter_path>]", language="bash"),
|
910 |
+
". The markdown files are combined to create jsonl files. PubMed Abstract (PMA) files were downloaded in xml. The BeautifulSoup library was used to extract the abstract, title, and PMID. All files were stored in jsonl format.",
|
911 |
),
|
912 |
P(B("Unique Data Preparation Challenges: ")),
|
913 |
Ul(
|
914 |
Li(
|
915 |
+
"We tried similar attempts on PMC as we did on ArXiv. The resulted markdown might have slight difference due to the different structure of the XML files.",
|
916 |
style="margin-bottom: -3px",
|
917 |
),
|
918 |
),
|
|
|
1639 |
table_html = data_preparation_steps.to_html(index=False, border=0)
|
1640 |
table_div = Div(NotStr(table_html), style="margin: 40px;")
|
1641 |
|
1642 |
+
text = P(
|
1643 |
+
"""This initial stage serves as the foundation for the entire
|
1644 |
process. Here, we focus on acquiring and extracting the raw data, which can
|
1645 |
come from various sources such as crawling websites, using HTTP/FTP dumps,
|
1646 |
or working with archive dumps. For instance, to download and prepare a
|
|
|
1650 |
preparation process: It is worth noting that some pipelines might require
|
1651 |
invoking additional functions or scripts to handle specific data sources or
|
1652 |
formats. These helper scripts can be located within specific directories
|
1653 |
+
or modules dedicated to the dataset."""
|
1654 |
+
)
|
1655 |
|
1656 |
return Div(
|
1657 |
Section(
|