Datasets:

Languages:
English
ArXiv:
License:
albertvillanova HF staff commited on
Commit
9958a63
1 Parent(s): 05845f9

Replace data URL in SAMSum dataset and support streaming (#4254)

Browse files

* Use Hub data URL in samsum dataset

* Support streaming samsum dataset

* Clean code

* Update metadata JSON

Commit from https://github.com/huggingface/datasets/commit/19c28817d566dd1f2c0426934184c49413f97d6e

Files changed (2) hide show
  1. dataset_infos.json +1 -1
  2. samsum.py +8 -13
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"samsum": {"description": "\nSAMSum Corpus contains over 16k chat dialogues with manually annotated\nsummaries.\nThere are two features:\n - dialogue: text of dialogue.\n - summary: human written summary of the dialogue.\n - id: id of a example.\n", "citation": "\n@article{gliwa2019samsum,\n title={SAMSum Corpus: A Human-annotated Dialogue Dataset for Abstractive Summarization},\n author={Gliwa, Bogdan and Mochol, Iwona and Biesek, Maciej and Wawer, Aleksander},\n journal={arXiv preprint arXiv:1911.12237},\n year={2019}\n}\n", "homepage": "https://arxiv.org/abs/1911.12237v2", "license": "CC BY-NC-ND 4.0", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "dialogue": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "samsum", "config_name": "samsum", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 9479141, "num_examples": 14732, "dataset_name": "samsum"}, "test": {"name": "test", "num_bytes": 534492, "num_examples": 819, "dataset_name": "samsum"}, "validation": {"name": "validation", "num_bytes": 516431, "num_examples": 818, "dataset_name": "samsum"}}, "download_checksums": {"https://arxiv.org/src/1911.12237v2/anc/corpus.7z": {"num_bytes": 2944100, "checksum": "a97674c66726f66b98a08ca5e8868fb8af9d4843f2b05c4f839bc5cfe91e8899"}}, "download_size": 2944100, "post_processing_size": null, "dataset_size": 10530064, "size_in_bytes": 13474164}}
 
1
+ {"samsum": {"description": "\nSAMSum Corpus contains over 16k chat dialogues with manually annotated\nsummaries.\nThere are two features:\n - dialogue: text of dialogue.\n - summary: human written summary of the dialogue.\n - id: id of a example.\n", "citation": "\n@article{gliwa2019samsum,\n title={SAMSum Corpus: A Human-annotated Dialogue Dataset for Abstractive Summarization},\n author={Gliwa, Bogdan and Mochol, Iwona and Biesek, Maciej and Wawer, Aleksander},\n journal={arXiv preprint arXiv:1911.12237},\n year={2019}\n}\n", "homepage": "https://arxiv.org/abs/1911.12237", "license": "CC BY-NC-ND 4.0", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "dialogue": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "samsum", "config_name": "samsum", "version": "0.0.0", "splits": {"train": {"name": "train", "num_bytes": 9479141, "num_examples": 14732, "dataset_name": "samsum"}, "test": {"name": "test", "num_bytes": 534492, "num_examples": 819, "dataset_name": "samsum"}, "validation": {"name": "validation", "num_bytes": 516431, "num_examples": 818, "dataset_name": "samsum"}}, "download_checksums": {"https://huggingface.co/datasets/datafiles/samsum/resolve/main/corpus.7z": {"num_bytes": 2944100, "checksum": "a97674c66726f66b98a08ca5e8868fb8af9d4843f2b05c4f839bc5cfe91e8899"}}, "download_size": 2944100, "post_processing_size": null, "dataset_size": 10530064, "size_in_bytes": 13474164}}
samsum.py CHANGED
@@ -40,11 +40,11 @@ There are two features:
40
  - id: id of a example.
41
  """
42
 
43
- _HOMEPAGE = "https://arxiv.org/abs/1911.12237v2"
44
 
45
  _LICENSE = "CC BY-NC-ND 4.0"
46
 
47
- _URLs = "https://arxiv.org/src/1911.12237v2/anc/corpus.7z"
48
 
49
 
50
  class Samsum(datasets.GeneratorBasedBuilder):
@@ -75,12 +75,10 @@ class Samsum(datasets.GeneratorBasedBuilder):
75
 
76
  def _split_generators(self, dl_manager):
77
  """Returns SplitGenerators."""
78
- my_urls = _URLs
79
- path = dl_manager.download_and_extract(my_urls)
80
  return [
81
  datasets.SplitGenerator(
82
  name=datasets.Split.TRAIN,
83
- # These kwargs will be passed to _generate_examples
84
  gen_kwargs={
85
  "filepath": (path, "train.json"),
86
  "split": "train",
@@ -88,7 +86,6 @@ class Samsum(datasets.GeneratorBasedBuilder):
88
  ),
89
  datasets.SplitGenerator(
90
  name=datasets.Split.TEST,
91
- # These kwargs will be passed to _generate_examples
92
  gen_kwargs={
93
  "filepath": (path, "test.json"),
94
  "split": "test",
@@ -96,7 +93,6 @@ class Samsum(datasets.GeneratorBasedBuilder):
96
  ),
97
  datasets.SplitGenerator(
98
  name=datasets.Split.VALIDATION,
99
- # These kwargs will be passed to _generate_examples
100
  gen_kwargs={
101
  "filepath": (path, "val.json"),
102
  "split": "val",
@@ -106,12 +102,11 @@ class Samsum(datasets.GeneratorBasedBuilder):
106
 
107
  def _generate_examples(self, filepath, split):
108
  """Yields examples."""
109
-
110
  path, fname = filepath
111
-
112
- with py7zr.SevenZipFile(path, "r") as z:
113
- for name, bio in z.readall().items():
114
- if name == fname:
115
- data = json.load(bio)
116
  for example in data:
117
  yield example["id"], example
 
40
  - id: id of a example.
41
  """
42
 
43
+ _HOMEPAGE = "https://arxiv.org/abs/1911.12237"
44
 
45
  _LICENSE = "CC BY-NC-ND 4.0"
46
 
47
+ _URL = "https://huggingface.co/datasets/datafiles/samsum/resolve/main/corpus.7z"
48
 
49
 
50
  class Samsum(datasets.GeneratorBasedBuilder):
 
75
 
76
  def _split_generators(self, dl_manager):
77
  """Returns SplitGenerators."""
78
+ path = dl_manager.download(_URL)
 
79
  return [
80
  datasets.SplitGenerator(
81
  name=datasets.Split.TRAIN,
 
82
  gen_kwargs={
83
  "filepath": (path, "train.json"),
84
  "split": "train",
 
86
  ),
87
  datasets.SplitGenerator(
88
  name=datasets.Split.TEST,
 
89
  gen_kwargs={
90
  "filepath": (path, "test.json"),
91
  "split": "test",
 
93
  ),
94
  datasets.SplitGenerator(
95
  name=datasets.Split.VALIDATION,
 
96
  gen_kwargs={
97
  "filepath": (path, "val.json"),
98
  "split": "val",
 
102
 
103
  def _generate_examples(self, filepath, split):
104
  """Yields examples."""
 
105
  path, fname = filepath
106
+ with open(path, "rb") as f:
107
+ with py7zr.SevenZipFile(f, "r") as z:
108
+ for name, bio in z.readall().items():
109
+ if name == fname:
110
+ data = json.load(bio)
111
  for example in data:
112
  yield example["id"], example