Datasets:
Update files from the datasets library (from 1.4.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.4.0
eli5.py
CHANGED
@@ -20,7 +20,6 @@ from __future__ import absolute_import, division, print_function
|
|
20 |
import bz2
|
21 |
import io
|
22 |
import json
|
23 |
-
import logging
|
24 |
import lzma
|
25 |
import os
|
26 |
import re
|
@@ -31,6 +30,9 @@ from time import time
|
|
31 |
import datasets
|
32 |
|
33 |
|
|
|
|
|
|
|
34 |
_SUB_REDDITS = ["explainlikeimfive", "askscience", "AskHistorians"]
|
35 |
_REDDIT_URL = "https://files.pushshift.io/reddit/"
|
36 |
|
@@ -300,13 +302,13 @@ class Eli5(datasets.GeneratorBasedBuilder):
|
|
300 |
self._cache_dir_root, self._relative_data_dir(with_version=False), "reddit_downloaded_qa_lists.json"
|
301 |
)
|
302 |
if isfile(qa_data_file):
|
303 |
-
|
304 |
self.filtered_reddit = json.load(open(qa_data_file))
|
305 |
else:
|
306 |
self.filtered_reddit = _download_and_filter_reddit(
|
307 |
dl_manager, start_year=2011, start_month=7, end_year=2019, end_month=7
|
308 |
)
|
309 |
-
|
310 |
json.dump(self.filtered_reddit, open(qa_data_file, "w"))
|
311 |
# download data splits from AWS
|
312 |
fpath_splits = dl_manager.download(self._DATA_SPLIT_URL)
|
@@ -351,7 +353,7 @@ class Eli5(datasets.GeneratorBasedBuilder):
|
|
351 |
]
|
352 |
|
353 |
def _generate_examples(self, split, subreddit_name):
|
354 |
-
|
355 |
if split in self.data_split.get(subreddit_name, []):
|
356 |
id_list = self.data_split[subreddit_name][split]
|
357 |
data = [
|
|
|
20 |
import bz2
|
21 |
import io
|
22 |
import json
|
|
|
23 |
import lzma
|
24 |
import os
|
25 |
import re
|
|
|
30 |
import datasets
|
31 |
|
32 |
|
33 |
+
logger = datasets.logging.get_logger(__name__)
|
34 |
+
|
35 |
+
|
36 |
_SUB_REDDITS = ["explainlikeimfive", "askscience", "AskHistorians"]
|
37 |
_REDDIT_URL = "https://files.pushshift.io/reddit/"
|
38 |
|
|
|
302 |
self._cache_dir_root, self._relative_data_dir(with_version=False), "reddit_downloaded_qa_lists.json"
|
303 |
)
|
304 |
if isfile(qa_data_file):
|
305 |
+
logger.info("loading pre-computed QA list")
|
306 |
self.filtered_reddit = json.load(open(qa_data_file))
|
307 |
else:
|
308 |
self.filtered_reddit = _download_and_filter_reddit(
|
309 |
dl_manager, start_year=2011, start_month=7, end_year=2019, end_month=7
|
310 |
)
|
311 |
+
logger.info("saving pre-computed QA list")
|
312 |
json.dump(self.filtered_reddit, open(qa_data_file, "w"))
|
313 |
# download data splits from AWS
|
314 |
fpath_splits = dl_manager.download(self._DATA_SPLIT_URL)
|
|
|
353 |
]
|
354 |
|
355 |
def _generate_examples(self, split, subreddit_name):
|
356 |
+
logger.info("generating examples from = {}, {} set".format(subreddit_name, split))
|
357 |
if split in self.data_split.get(subreddit_name, []):
|
358 |
id_list = self.data_split[subreddit_name][split]
|
359 |
data = [
|