Spaces:
Sleeping
Sleeping
victormiller
commited on
Commit
•
d7d3731
1
Parent(s):
6e2a2df
Update results.py
Browse files- results.py +4 -28
results.py
CHANGED
@@ -4,7 +4,10 @@ from fasthtml.components import *
|
|
4 |
intro_div = Div(
|
5 |
H2("Perplexity Evaluation on Duplicate Data"),
|
6 |
H3("Model based Quality Estimation"),
|
7 |
-
|
|
|
|
|
|
|
8 |
)
|
9 |
|
10 |
perp1_div = Div(
|
@@ -39,33 +42,6 @@ def results():
|
|
39 |
Section(
|
40 |
intro_div,
|
41 |
perp1_div,
|
42 |
-
H3("Perplexity vs Years"),
|
43 |
-
perp_v_years,
|
44 |
-
perp_v_years_img,
|
45 |
-
H3("Perplexity vs Document Duplication"),
|
46 |
-
perp_v_docdup,
|
47 |
-
perp_v_docdup_img,
|
48 |
-
H3("Perplexity vs Dump Duplication"),
|
49 |
-
perp_v_dumpdup,
|
50 |
-
perp_v_dumpdup_img,
|
51 |
-
H3("Perplexity vs Local Buckets"),
|
52 |
-
perp_v_localbuck,
|
53 |
-
perp_v_localbuck_img,
|
54 |
-
H3("Perplexity vs Local Dump Duplication"),
|
55 |
-
perp_v_localdump,
|
56 |
-
perp_v_localdump_img,
|
57 |
-
H2("Llama 3.1 8B"),
|
58 |
-
llama_explan,
|
59 |
-
H3("Perplexity vs Buckets"),
|
60 |
-
llama_perp_v_buckets_img,
|
61 |
-
H3("Perplexity vs Years"),
|
62 |
-
llama_perp_v_years_img,
|
63 |
-
H3("Perplexity vs Dump Duplication"),
|
64 |
-
llama_perp_v_dumpdup_img,
|
65 |
-
H3("Perplexity vs Local Buckets"),
|
66 |
-
llama_perp_v_localbuck_img,
|
67 |
-
H3("Perplexity vs Local Dump Duplication"),
|
68 |
-
llama_perp_v_localdump_img,
|
69 |
id="inner-text"
|
70 |
)
|
71 |
)
|
|
|
4 |
intro_div = Div(
|
5 |
H2("Perplexity Evaluation on Duplicate Data"),
|
6 |
H3("Model based Quality Estimation"),
|
7 |
+
P("We took one of the model-based data quality evaluation strategies adopted by [DataComp-LM](https://arxiv.org/abs/2406.11794), which used perplexity filtering as a candidate for quality filtering. DataComp-LM followed [CCNet’s](https://arxiv.org/abs/1911.00359) practice to use a 5-gram Kneser-Ney model as implemented in the [KenLM](https://github.com/kpu/kenlm) library for efficient perplexity calculation. Following this practice, we estimated data quality by taking a KenLM model (from [edugp/kenlm](https://huggingface.co/edugp/kenlm)) trained on English Wikipedia data to compute perplexity on data with different duplication patterns. Lower perplexity is regarded as a signal of higher quality."),
|
8 |
+
H3("Sampling Strategy"),
|
9 |
+
P("We started from a processed Common Crawl (CC) ablation dataset divided by the number of duplicates of each document. For each CC dump, we have different buckets each holding chunks of document with different duplicate count ranges (1-1, 2-5, 6-10, 11-100, 101-1000, 1001-30000000). We sampled the first 10k documents from each chunk with their meta data."),
|
10 |
+
|
11 |
)
|
12 |
|
13 |
perp1_div = Div(
|
|
|
42 |
Section(
|
43 |
intro_div,
|
44 |
perp1_div,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
id="inner-text"
|
46 |
)
|
47 |
)
|