update configs
Browse files- configs.txt +81 -0
- get_configs.py +1 -1
- wikitext_alltime.py +13 -6
configs.txt
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2017-1
|
2 |
+
2017-10
|
3 |
+
2017-11
|
4 |
+
2017-12
|
5 |
+
2017-2
|
6 |
+
2017-3
|
7 |
+
2017-4
|
8 |
+
2017-5
|
9 |
+
2017-6
|
10 |
+
2017-7
|
11 |
+
2017-8
|
12 |
+
2017-9
|
13 |
+
2018-1
|
14 |
+
2018-10
|
15 |
+
2018-11
|
16 |
+
2018-12
|
17 |
+
2018-2
|
18 |
+
2018-3
|
19 |
+
2018-4
|
20 |
+
2018-5
|
21 |
+
2018-6
|
22 |
+
2018-7
|
23 |
+
2018-8
|
24 |
+
2018-9
|
25 |
+
2019-1
|
26 |
+
2019-10
|
27 |
+
2019-11
|
28 |
+
2019-12
|
29 |
+
2019-2
|
30 |
+
2019-3
|
31 |
+
2019-4
|
32 |
+
2019-5
|
33 |
+
2019-6
|
34 |
+
2019-7
|
35 |
+
2019-8
|
36 |
+
2019-9
|
37 |
+
2020-1
|
38 |
+
2020-10
|
39 |
+
2020-11
|
40 |
+
2020-12
|
41 |
+
2020-2
|
42 |
+
2020-3
|
43 |
+
2020-4
|
44 |
+
2020-5
|
45 |
+
2020-6
|
46 |
+
2020-7
|
47 |
+
2020-8
|
48 |
+
2020-9
|
49 |
+
2021-1
|
50 |
+
2021-10
|
51 |
+
2021-11
|
52 |
+
2021-12
|
53 |
+
2021-2
|
54 |
+
2021-3
|
55 |
+
2021-4
|
56 |
+
2021-5
|
57 |
+
2021-6
|
58 |
+
2021-7
|
59 |
+
2021-8
|
60 |
+
2021-9
|
61 |
+
2022-1
|
62 |
+
2022-10
|
63 |
+
2022-11
|
64 |
+
2022-12
|
65 |
+
2022-2
|
66 |
+
2022-3
|
67 |
+
2022-4
|
68 |
+
2022-5
|
69 |
+
2022-6
|
70 |
+
2022-7
|
71 |
+
2022-8
|
72 |
+
2022-9
|
73 |
+
2023-1
|
74 |
+
2023-2
|
75 |
+
2023-3
|
76 |
+
2023-4
|
77 |
+
2023-5
|
78 |
+
2023-6
|
79 |
+
2023-7
|
80 |
+
2023-8
|
81 |
+
2023-9
|
get_configs.py
CHANGED
@@ -3,7 +3,7 @@ import os
|
|
3 |
|
4 |
all_articles = glob.glob('./wiki/*.json')
|
5 |
file_names = [os.path.splitext(os.path.basename(path))[0] for path in all_articles]
|
6 |
-
_TIMES = sorted(file_names)
|
7 |
|
8 |
# Write the string to a Python file
|
9 |
with open('configs.txt', 'w') as file:
|
|
|
3 |
|
4 |
all_articles = glob.glob('./wiki/*.json')
|
5 |
file_names = [os.path.splitext(os.path.basename(path))[0] for path in all_articles]
|
6 |
+
_TIMES = sorted(file_names)
|
7 |
|
8 |
# Write the string to a Python file
|
9 |
with open('configs.txt', 'w') as file:
|
wikitext_alltime.py
CHANGED
@@ -1,6 +1,14 @@
|
|
1 |
import datasets
|
2 |
import json
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
_CITATION = """\
|
5 |
@misc{li2023estimating,
|
6 |
title={Estimating Contamination via Perplexity: Quantifying Memorisation in Language Model Evaluation},
|
@@ -13,14 +21,11 @@ _CITATION = """\
|
|
13 |
"""
|
14 |
|
15 |
_DESCRIPTION = """\
|
16 |
-
This dataset contains Wikipedia articles of 419 selected pages from 2017 to
|
17 |
"""
|
18 |
|
19 |
_HOMEPAGE = "https://github.com/liyucheng09/Contamination_Detector"
|
20 |
|
21 |
-
_TIMES = ["2017-10", "2017-11", "2017-12", "2017-1", "2017-2", "2017-3", "2017-4", "2017-5", "2017-6", "2017-7", "2017-8", "2017-9", "2018-10", "2018-11", "2018-12", "2018-1", "2018-2", "2018-3", "2018-4", "2018-5", "2018-6", "2018-7", "2018-8", "2018-9", "2019-10", "2019-11", "2019-12", "2019-1", "2019-2", "2019-3", "2019-4", "2019-5", "2019-6", "2019-7", "2019-8", "2019-9", "2020-10", "2020-11", "2020-12", "2020-1", "2020-2", "2020-3", "2020-4", "2020-5", "2020-6", "2020-7", "2020-8", "2020-9", "2021-10", "2021-11", "2021-12", "2021-1", "2021-2", "2021-3", "2021-4", "2021-5", "2021-6", "2021-7", "2021-8", "2021-9", "2022-10", "2022-11", "2022-12", "2023-1", "2023-2", "2023-3", "2023-4", "2023-5", "2023-6", "2023-7", "2023-8", "2023-9", "2022-1", "2022-2", "2022-3", "2022-4", "2022-5", "2022-6", "2022-7", "2022-8", "2022-9", "all"]
|
22 |
-
|
23 |
-
|
24 |
class Wikitext_alltimes(datasets.GeneratorBasedBuilder):
|
25 |
|
26 |
BUILDER_CONFIGS = [
|
@@ -49,7 +54,7 @@ class Wikitext_alltimes(datasets.GeneratorBasedBuilder):
|
|
49 |
"""Returns SplitGenerators."""
|
50 |
if self.config.name == "all":
|
51 |
times = _TIMES[:-1]
|
52 |
-
files = dl_manager.
|
53 |
return [
|
54 |
datasets.SplitGenerator(
|
55 |
name=datasets.Split.TRAIN,
|
@@ -58,7 +63,7 @@ class Wikitext_alltimes(datasets.GeneratorBasedBuilder):
|
|
58 |
]
|
59 |
else:
|
60 |
time = self.config.name
|
61 |
-
_URL = f"
|
62 |
file = dl_manager.download(_URL)
|
63 |
return [
|
64 |
datasets.SplitGenerator(
|
@@ -80,6 +85,7 @@ class Wikitext_alltimes(datasets.GeneratorBasedBuilder):
|
|
80 |
"title": article['title'],
|
81 |
"pageid": article['pageid'],
|
82 |
"text": article['text'],
|
|
|
83 |
}
|
84 |
else:
|
85 |
assert isinstance(files, str)
|
@@ -91,4 +97,5 @@ class Wikitext_alltimes(datasets.GeneratorBasedBuilder):
|
|
91 |
"title": article['title'],
|
92 |
"pageid": article['pageid'],
|
93 |
"text": article['text'],
|
|
|
94 |
}
|
|
|
1 |
import datasets
|
2 |
import json
|
3 |
|
4 |
+
dl = datasets.DownloadManager()
|
5 |
+
configs_file = dl.download('https://huggingface.co/datasets/RealTimeData/wikitext_alltime/raw/main/configs.txt')
|
6 |
+
|
7 |
+
with open(configs_file, encoding="utf-8") as f:
|
8 |
+
_TIMES = f.read().splitlines()
|
9 |
+
|
10 |
+
_TIMES += ['all']
|
11 |
+
|
12 |
_CITATION = """\
|
13 |
@misc{li2023estimating,
|
14 |
title={Estimating Contamination via Perplexity: Quantifying Memorisation in Language Model Evaluation},
|
|
|
21 |
"""
|
22 |
|
23 |
_DESCRIPTION = """\
|
24 |
+
This dataset contains Wikipedia articles of 419 selected pages every month from 2017-1 to current. The articles are arraged by month. Access the specific month by using the format "YYYY-MM" as config. Such as load_dataset("RealTimeData/wikitext_alltime", "2021-1").
|
25 |
"""
|
26 |
|
27 |
_HOMEPAGE = "https://github.com/liyucheng09/Contamination_Detector"
|
28 |
|
|
|
|
|
|
|
29 |
class Wikitext_alltimes(datasets.GeneratorBasedBuilder):
|
30 |
|
31 |
BUILDER_CONFIGS = [
|
|
|
54 |
"""Returns SplitGenerators."""
|
55 |
if self.config.name == "all":
|
56 |
times = _TIMES[:-1]
|
57 |
+
files = dl_manager.download([f"articles/{time}.json" for time in _TIMES ])
|
58 |
return [
|
59 |
datasets.SplitGenerator(
|
60 |
name=datasets.Split.TRAIN,
|
|
|
63 |
]
|
64 |
else:
|
65 |
time = self.config.name
|
66 |
+
_URL = f"articles/{time}.json"
|
67 |
file = dl_manager.download(_URL)
|
68 |
return [
|
69 |
datasets.SplitGenerator(
|
|
|
85 |
"title": article['title'],
|
86 |
"pageid": article['pageid'],
|
87 |
"text": article['text'],
|
88 |
+
"time": time,
|
89 |
}
|
90 |
else:
|
91 |
assert isinstance(files, str)
|
|
|
97 |
"title": article['title'],
|
98 |
"pageid": article['pageid'],
|
99 |
"text": article['text'],
|
100 |
+
"time": time,
|
101 |
}
|