liyucheng commited on
Commit
02e8cc7
1 Parent(s): 9408f93

Create bbc_alltime.py

Browse files
Files changed (1) hide show
  1. bbc_alltime.py +108 -0
bbc_alltime.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import json
3
+
4
+ _CITATION = """\
5
+ @misc{li2023estimating,
6
+ title={Estimating Contamination via Perplexity: Quantifying Memorisation in Language Model Evaluation},
7
+ author={Yucheng Li},
8
+ year={2023},
9
+ eprint={2309.10677},
10
+ archivePrefix={arXiv},
11
+ primaryClass={cs.CL}
12
+ }
13
+ """
14
+
15
+ _DESCRIPTION = """\
16
+ This dataset contains BBC News articles from 2017 to 2022. The articles are arraged by month. Access the specific month by using the format "YYYY-MM" as config. Such as load_dataset("RealTimeData/bbc_alltime", "2021-1").
17
+ """
18
+
19
+ _HOMEPAGE = "https://github.com/liyucheng09/Contamination_Detector"
20
+
21
+ _TIMES = ["2017-10", "2017-11", "2017-12", "2017-1", "2017-2", "2017-3", "2017-4", "2017-5", "2017-6", "2017-7", "2017-8", "2017-9", "2018-10", "2018-11", "2018-12", "2018-1", "2018-2", "2018-3", "2018-4", "2018-5", "2018-6", "2018-7", "2018-8", "2018-9", "2019-10", "2019-11", "2019-12", "2019-1", "2019-2", "2019-3", "2019-4", "2019-5", "2019-6", "2019-7", "2019-8", "2019-9", "2020-10", "2020-11", "2020-12", "2020-1", "2020-2", "2020-3", "2020-4", "2020-5", "2020-6", "2020-7", "2020-8", "2020-9", "2021-10", "2021-11", "2021-12", "2021-1", "2021-2", "2021-3", "2021-4", "2021-5", "2021-6", "2021-7", "2021-8", "2021-9", "2022-10", "2022-11", "2022-12", "2022-1", "2022-2", "2022-3", "2022-4", "2022-5", "2022-6", "2022-7", "2022-8", "2022-9", "all"]
22
+
23
+
24
+ class Bbc_alltimes(datasets.GeneratorBasedBuilder):
25
+
26
+ BUILDER_CONFIGS = [
27
+ datasets.BuilderConfig(
28
+ name=time, version=datasets.Version("1.0.0"), description=f"BBC News articles published in the priod of {time}"
29
+ )
30
+ for time in _TIMES
31
+ ]
32
+
33
+ def _info(self):
34
+ features = datasets.Features(
35
+ {
36
+ "title": datasets.Value("string"),
37
+ "published_date": datasets.Value("string"),
38
+ "authors": datasets.Value("string"),
39
+ "description": datasets.Value("string"),
40
+ "section": datasets.Value("string"),
41
+ "content": datasets.Value("string"),
42
+ "link": datasets.Value("string"),
43
+ }
44
+ )
45
+ return datasets.DatasetInfo(
46
+ description=_DESCRIPTION,
47
+ features=features,
48
+ homepage=_HOMEPAGE,
49
+ citation=_CITATION,
50
+ )
51
+
52
+ def _split_generators(self, dl_manager):
53
+ """Returns SplitGenerators."""
54
+ if self.config.name == "all":
55
+ times = _TIMES[:-1]
56
+ files = dl_manager.download_and_extract('all_articles.zip')
57
+ return [
58
+ datasets.SplitGenerator(
59
+ name=datasets.Split.TRAIN,
60
+ gen_kwargs={"files": files},
61
+ )
62
+ ]
63
+ else:
64
+ time = self.config.name
65
+ _URL = f"articles/{time}.json"
66
+ file = dl_manager.download(_URL)
67
+ return [
68
+ datasets.SplitGenerator(
69
+ name=datasets.Split.TRAIN,
70
+ gen_kwargs={"files": file},
71
+ )
72
+ ]
73
+
74
+ def _generate_examples(self, files):
75
+ """Yields examples."""
76
+ if self.config.name == "all":
77
+ assert isinstance(files, list)
78
+ for file in files:
79
+ time = file.strip('.json')
80
+ with open(file, encoding="utf-8") as f:
81
+ data = json.load(f)
82
+ length = len(data['title'])
83
+ for i in range(length):
84
+ yield f'{time}-{i}', {
85
+ "title": data['title'][i],
86
+ "published_date": data['published_date'][i],
87
+ "authors": data['authors'][i],
88
+ "description": data['description'][i],
89
+ "section": data['section'][i],
90
+ "content": data['content'][i],
91
+ "link": data['link'][i],
92
+ }
93
+ else:
94
+ assert isinstance(files, str)
95
+ time = self.config.name
96
+ with open(files, encoding="utf-8") as f:
97
+ data = json.load(f)
98
+ length = len(data['title'])
99
+ for i in range(length):
100
+ yield f'{time}-{i}', {
101
+ "title": data['title'][i],
102
+ "published_date": data['published_date'][i],
103
+ "authors": data['authors'][i],
104
+ "description": data['description'][i],
105
+ "section": data['section'][i],
106
+ "content": data['content'][i],
107
+ "link": data['link'][i],
108
+ }