File size: 10,818 Bytes
e137e27
 
f4bc6d2
b4a29fd
f5c7100
b4a29fd
 
 
 
f5c7100
 
 
 
 
 
 
 
 
 
 
 
 
 
42102b3
f5c7100
42102b3
f5c7100
b4a29fd
f5c7100
 
 
42102b3
 
 
 
f5c7100
 
 
42102b3
 
 
 
f5c7100
b4a29fd
 
 
 
f29b166
e137e27
f29b166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e137e27
f29b166
b87e744
f4bc6d2
feb6fe5
 
 
 
 
 
 
 
 
 
 
 
 
 
f29b166
feb6fe5
 
 
 
 
f29b166
feb6fe5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e137e27
feb6fe5
 
 
 
 
b4a29fd
feb6fe5
 
 
e137e27
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
from fasthtml.common import *
from fasthtml.components import *
import json
from fh_plotly import plotly2fasthtml
from plotly import graph_objects as go
import pandas as pd
import plotly.express as px

#Perplexity Across Different Buckets (global)
# The data you provided
DATA = [
    ["2014", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [17.410227605477868, 16.11176217183986, 15.632757662414805, 15.446116676532212, 16.716943171826703, 18.156821563322765]]],
    ["2015", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [17.446573602753478, 16.14852530113782, 15.627408549576069, 15.0055028132117, 15.565430373421485, 17.314701050452452]]],
    ["2016", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [17.307221780905284, 16.297702171159543, 15.948641884223639, 14.799690714225637, 14.935989931859659, 16.09585768919658]]],
    ["2017", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [17.338525603992114, 15.960924352297502, 15.912187993988933, 14.822102470001267, 14.778913482337416, 15.428145290012955]]],
    ["2018", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [17.08551151136689, 16.187802102106698, 14.935072408852303, 14.832038213200583, 14.508674264491997, 14.800605964649103]]],
    ["2019", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [16.818363305107052, 16.474269837858706, 14.944741674400241, 14.568394784374943, 14.690158822673334, 15.990949424635108]]],
    ["2020", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [16.98821894111693, 15.936494557783181, 14.79960386342691, 14.435682562274105, 14.58651834886038, 15.869365567783806]]],
    ["2021", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [17.125795647512877, 15.780419457145868, 14.631430892394002, 14.276477514399625, 14.337146941773641, 15.872474774329305]]],
    ["2022", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [16.573462144306383, 15.283018703313582, 14.378277745163881, 14.0611924390084, 13.9886330091318, 15.769421394877273]]],
    ["2023", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [15.4293630385597, 14.608379914730168, 14.118271697056592, 13.880215644749589, 13.767106666731275, 15.05749135510839]]]
]

# Extract ranges (buckets) and years
ranges = DATA[0][1][0]
years = [year_data[0] for year_data in DATA]
all_values = [year_data[1][1] for year_data in DATA]

# Create the figure
fig = go.Figure()

# Add a trace for each year
for i, year in enumerate(years):
    values = all_values[i]
    fig.add_trace(go.Scatter(x=ranges, y=values, mode='lines+markers', name=year))

# Update layout
fig.update_layout(
    title="Perplexity Versus Buckets for Different Years",
    xaxis_title="Buckets",
    yaxis_title="Perplexity",
    legend_title="Years",
    hovermode="x unified"
)

Perplexity_Across_Different_Buckets_global_graph = fig

import plotly.graph_objects as go

# The data you provided
DATA = [
    ["2014", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [17.410227605477868, 16.11176217183986, 15.632757662414805, 15.446116676532212, 16.716943171826703, 18.156821563322765]]],
    ["2015", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [17.446573602753478, 16.14852530113782, 15.627408549576069, 15.0055028132117, 15.565430373421485, 17.314701050452452]]],
    ["2016", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [17.307221780905284, 16.297702171159543, 15.948641884223639, 14.799690714225637, 14.935989931859659, 16.09585768919658]]],
    ["2017", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [17.338525603992114, 15.960924352297502, 15.912187993988933, 14.822102470001267, 14.778913482337416, 15.428145290012955]]],
    ["2018", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [17.08551151136689, 16.187802102106698, 14.935072408852303, 14.832038213200583, 14.508674264491997, 14.800605964649103]]],
    ["2019", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [16.818363305107052, 16.474269837858706, 14.944741674400241, 14.568394784374943, 14.690158822673334, 15.990949424635108]]],
    ["2020", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [16.98821894111693, 15.936494557783181, 14.79960386342691, 14.435682562274105, 14.58651834886038, 15.869365567783806]]],
    ["2021", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [17.125795647512877, 15.780419457145868, 14.631430892394002, 14.276477514399625, 14.337146941773641, 15.872474774329305]]],
    ["2022", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [16.573462144306383, 15.283018703313582, 14.378277745163881, 14.0611924390084, 13.9886330091318, 15.769421394877273]]],
    ["2023", [["1-1", "2-5", "6-10", "11-100", "101-1000", "1001-30000000"], [15.4293630385597, 14.608379914730168, 14.118271697056592, 13.880215644749589, 13.767106666731275, 15.05749135510839]]]
]

# Extract years and ranges (buckets)
years = [year_data[0] for year_data in DATA]
ranges = DATA[0][1][0]
all_values = [year_data[1][1] for year_data in DATA]

# Create the figure
fig = go.Figure()

# Add a trace for each range (bucket)
for i, range_label in enumerate(ranges):
    values = [year_values[i] for year_values in all_values]
    fig.add_trace(go.Scatter(x=years, y=values, mode='lines+markers', name=range_label))

# Update layout
fig.update_layout(
    title="Perplexity over Time by Buckets",
    xaxis_title="Year",
    yaxis_title="Perplexity",
    legend_title="Buckets",
    hovermode="x unified"
)

# Show the plot

Perplexity_Across_Different_years_graph = fig


intro_div = Div(
        H2("Perplexity Evaluation on Duplicate Data"),
        H3("Model based Quality Estimation"),
        P("We took one of the model-based data quality evaluation strategies adopted by [DataComp-LM](https://arxiv.org/abs/2406.11794), which used perplexity filtering as a candidate for quality filtering. DataComp-LM followed [CCNet’s](https://arxiv.org/abs/1911.00359) practice to use a 5-gram Kneser-Ney model as implemented in the [KenLM](https://github.com/kpu/kenlm) library for efficient perplexity calculation. Following this practice, we estimated data quality by taking a KenLM model (from [edugp/kenlm](https://huggingface.co/edugp/kenlm)) trained on English Wikipedia data to compute perplexity on data with different duplication patterns. Lower perplexity is regarded as a signal of higher quality."),
        H3("Sampling Strategy"),
        P("We started from a processed Common Crawl (CC) ablation dataset divided by the number of duplicates of each document. For each CC dump, we have different buckets each holding chunks of document with different duplicate count ranges (1-1, 2-5, 6-10, 11-100, 101-1000, 1001-30000000). We sampled the first 10k documents from each chunk with their meta data."),

)

perp1_div = Div(
        Section(
            H3("Perplexity vs Buckets"),
            P("For each bucket, we aggregated all the chunks that belong to a single year and calculated the average perplexity for each (bucket, year) data point."),
            Img(src="images/prep-diff-buckets-global.png", height = "300", width = "600" ),
            plotly2fasthtml(Perplexity_Across_Different_Buckets_global_graph),
        ),
        Section(
            H3("Perplexity vs Years"),
            P("Taking the same data, we can convert it into a graph indicating the yearly trend. For most buckets, the average perplexity of dumps from more recent years seem to be lower than that of former years."),
            Img(src="images/prep-across-diff-year-global-dup-buckets.png", height = "300", width = "600" ),
            plotly2fasthtml(Perplexity_Across_Different_years_graph),
        ),
    Section(
            H3("Perplexity vs Document Duplication"),
            P("We can also break each bucket into distinct document counts. The graph becomes a bit noisy at the end because of insufficient samples with larger duplication counts."),
            Img(src="images/prep-across-diff-docs-dup-count-global.png", height = "300", width = "600" ),
        ),
    Section(
            H3("Perplexity vs Dump Duplication"),
            P("We are also interested in how the number of dumps a document is in affect data quality. From the graph below we can see that documents that are duplicated across around 40 - 60 dumps usually have lower perplexity."),
            Img(src="images/prep-across-diff-dump-dup-counts-global.png", height = "300", width = "600" ),
        ),
    Section(
            H3("Perplexity vs Local Buckets"),
            P("Previously we have seen that documents in recent dumps tend to have lower perplexity. This might be related to the way how global deduplication was implemented. During global deduplication, we only keep copy in the latest dump. Hence documents that are duplicated across multiple dumps only appear in the latest one. To avoid bias brought by this strategy, we tried to recover the states before the global deduplication by reading the metadata attached with each document."),
            Img(src="images/prep-across-diff-buckets-local.png", height = "300", width = "600" ),
        ),
    Section(
            H3("Perplexity vs Local Dump Duplication"),
            P("Following the same practice, we can plot the local version of the graph of average perplexity with respect to dump duplication."),
            Img(src="images/prep-diff-dump-dump-counts-local.png", height = "300", width = "600" ),
        ),
)


llama_div = Div(
        Section(
            H2("Llama 3.1 8B"),
            P("For comparison purpose, we run the same perplexity evaluation with llama 3.1 8B model.")
        ),
        Section(
            H3("Perplexity vs Buckets"),
            Img(src="images/perp-across-diff-buckets-global.png", height = "300", width = "600" ),
        ),
        Section(
            H3("Perplexity vs Years"),
            Img(src="images/prep-across-diff-years-global.png", height = "300", width = "600" ),
        ),
    Section(
            H3("Perplexity vs Dump Duplication"),
            Img(src="images/prep-vs-dump-dup-global.png", height = "300", width = "600" ),
        ),
    Section(
            H3("Perplexity vs Local Buckets"),
            Img(src="images/prep-diff-buckets-local.png", height = "300", width = "600" ),
        ),
    Section(
            H3("Perplexity vs Local Dump Duplication"),
            Img(src="images/prep-vs-dump-dup-global.png", height = "300", width = "600" ),
        ),
)


def results():
    return Div(
                Section(
                    intro_div,
                    perp1_div,
                    llama_div,
                    P("test plotly"),
                    id="inner-text"
                )
    )