Datasets:

Languages:
English
ArXiv:
License:
system HF staff commited on
Commit
ba19f49
1 Parent(s): 64fe1c7

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (1) hide show
  1. eli5.py +8 -10
eli5.py CHANGED
@@ -115,9 +115,9 @@ def _open_compressed_file(f_name, f_type):
115
  # download a file, extract posts from desired subreddit, then remove from disk
116
  def _download_and_select_lines(dl_manager, f_url, mode, st_time):
117
  # download and pre-process original posts
118
- logger.info("downloading {} {:.2f}".format(f_url, time() - st_time))
119
  f_downloaded_path = dl_manager.download(f_url)
120
- logger.info("decompressing and filtering {} {:.2f}".format(f_url, time() - st_time))
121
  f, fh = _open_compressed_file(f_downloaded_path, f_url.split(".")[-1])
122
  lines = dict([(name, []) for name in _SUB_REDDITS])
123
  for line in f:
@@ -130,7 +130,7 @@ def _download_and_select_lines(dl_manager, f_url, mode, st_time):
130
  os.remove(f_downloaded_path)
131
  os.remove(f_downloaded_path + ".json")
132
  os.remove(f_downloaded_path + ".lock")
133
- logger.info("tokenizing and selecting {} {:.2f}".format(f_url, time() - st_time))
134
  processed_items = dict([(name, []) for name in _SUB_REDDITS])
135
  if mode == "submissions":
136
  key_list = ["id", "score", "url", "title", "selftext", "subreddit"]
@@ -146,9 +146,7 @@ def _download_and_select_lines(dl_manager, f_url, mode, st_time):
146
  else:
147
  reddit_res[k] = line[k]
148
  processed_items[name] += [reddit_res]
149
- logger.info(
150
- "Total found {} {} {:.2f}".format(sum([len(ls) for ls in processed_items.values()]), mode, time() - st_time)
151
- )
152
  return processed_items
153
 
154
 
@@ -191,7 +189,7 @@ def _download_and_filter_reddit(dl_manager, start_year=2011, start_month=7, end_
191
  for dct in processed_submissions[name]:
192
  qa_dict[name][dct["id"]] = dct
193
  else:
194
- logger.info("Could not find submissions dump file for year {:4d} month {:2d}".format(year, month))
195
  # then all answers
196
  for year in range(start_year, end_year + 1):
197
  start_mth = start_month if year == start_year else 1
@@ -210,7 +208,7 @@ def _download_and_filter_reddit(dl_manager, start_year=2011, start_month=7, end_
210
  merged_comments += 1
211
  qa_dict[name][did]["comments"] = qa_dict[name][did].get("comments", []) + [dct]
212
  else:
213
- logger.info("Could not find comments dump file for year {:4d} month {:2d}".format(year, month))
214
  # then post-process
215
  res = {}
216
  for name in _SUB_REDDITS:
@@ -354,7 +352,7 @@ class Eli5(datasets.GeneratorBasedBuilder):
354
  ]
355
 
356
  def _generate_examples(self, split, subreddit_name):
357
- logger.info("generating examples from = {}, {} set".format(subreddit_name, split))
358
  if split in self.data_split.get(subreddit_name, []):
359
  id_list = self.data_split[subreddit_name][split]
360
  data = [
@@ -386,7 +384,7 @@ class Eli5(datasets.GeneratorBasedBuilder):
386
  for i, ans in enumerate(example["comments"]):
387
  txt = ans["body"][0]
388
  for j, _ in enumerate(ans["body"][1]):
389
- txt = txt.replace("_URL_{}_".format(j), "_URL_{}_".format(map_url_indices[(i, j)]))
390
  answer_texts += [txt.strip()]
391
  yield id_, {
392
  "q_id": id_,
 
115
  # download a file, extract posts from desired subreddit, then remove from disk
116
  def _download_and_select_lines(dl_manager, f_url, mode, st_time):
117
  # download and pre-process original posts
118
+ logger.info(f"downloading {f_url} {time() - st_time:.2f}")
119
  f_downloaded_path = dl_manager.download(f_url)
120
+ logger.info(f"decompressing and filtering {f_url} {time() - st_time:.2f}")
121
  f, fh = _open_compressed_file(f_downloaded_path, f_url.split(".")[-1])
122
  lines = dict([(name, []) for name in _SUB_REDDITS])
123
  for line in f:
 
130
  os.remove(f_downloaded_path)
131
  os.remove(f_downloaded_path + ".json")
132
  os.remove(f_downloaded_path + ".lock")
133
+ logger.info("tokenizing and selecting {f_url} {time() - st_time:.2f}")
134
  processed_items = dict([(name, []) for name in _SUB_REDDITS])
135
  if mode == "submissions":
136
  key_list = ["id", "score", "url", "title", "selftext", "subreddit"]
 
146
  else:
147
  reddit_res[k] = line[k]
148
  processed_items[name] += [reddit_res]
149
+ logger.info(f"Total found {sum([len(ls) for ls in processed_items.values()])} {mode} {time() - st_time:.2f}")
 
 
150
  return processed_items
151
 
152
 
 
189
  for dct in processed_submissions[name]:
190
  qa_dict[name][dct["id"]] = dct
191
  else:
192
+ logger.info(f"Could not find submissions dump file for year {year:4d} month {month:2d}")
193
  # then all answers
194
  for year in range(start_year, end_year + 1):
195
  start_mth = start_month if year == start_year else 1
 
208
  merged_comments += 1
209
  qa_dict[name][did]["comments"] = qa_dict[name][did].get("comments", []) + [dct]
210
  else:
211
+ logger.info(f"Could not find comments dump file for year {year:4d} month {month:2d}")
212
  # then post-process
213
  res = {}
214
  for name in _SUB_REDDITS:
 
352
  ]
353
 
354
  def _generate_examples(self, split, subreddit_name):
355
+ logger.info(f"generating examples from = {subreddit_name}, {split} set")
356
  if split in self.data_split.get(subreddit_name, []):
357
  id_list = self.data_split[subreddit_name][split]
358
  data = [
 
384
  for i, ans in enumerate(example["comments"]):
385
  txt = ans["body"][0]
386
  for j, _ in enumerate(ans["body"][1]):
387
+ txt = txt.replace(f"_URL_{j}_", f"_URL_{map_url_indices[(i, j)]}_")
388
  answer_texts += [txt.strip()]
389
  yield id_, {
390
  "q_id": id_,