Datasets:

Languages:
English
ArXiv:
License:
jxm commited on
Commit
a49c3a2
1 Parent(s): 2b01fc3

Update link in wiki_bio dataset (#3651)

Browse files

* update link in wiki_bio dataset

* run linter and update dummy data

* fix markdown so that test passes (even though I didnt break it)

Commit from https://github.com/huggingface/datasets/commit/ffc35f41b41d6be6e067934b680f7abda2e91d49

README.md CHANGED
@@ -52,7 +52,7 @@ pretty_name: WikiBio
52
 
53
  - **Repository:** https://github.com/DavidGrangier/wikipedia-biography-dataset
54
  - **Paper:** https://arxiv.org/pdf/1603.07771.pdf
55
- - **GoogleDrive:** https://drive.google.com/uc?export=download&id=1L7aoUXzHPzyzQ0ns4ApBbYepsjFOtXil
56
 
57
  ### Dataset Summary
58
 
@@ -69,7 +69,7 @@ English.
69
 
70
  ### Data Instances
71
 
72
- [More Information Needed]
73
 
74
  ### Data Fields
75
 
@@ -132,7 +132,7 @@ where, in the `"table"` field, all the information of the Wikpedia infobox is st
132
  [More Information Needed]
133
 
134
  ### Source Data
135
- This dataset was announced in the paper <em>Neural Text Generation from Structured Data with Application to the Biography Domain</em> [(arxiv link)](https://arxiv.org/pdf/1603.07771.pdf) and is stored both in [this](https://github.com/DavidGrangier/wikipedia-biography-dataset) repo (owned by DavidGrangier) and in [Google Drive](https://drive.google.com/uc?export=download&id=1L7aoUXzHPzyzQ0ns4ApBbYepsjFOtXil) (zipped and mantained by the TensorFlow team).
136
  #### Initial Data Collection and Normalization
137
 
138
  [More Information Needed]
 
52
 
53
  - **Repository:** https://github.com/DavidGrangier/wikipedia-biography-dataset
54
  - **Paper:** https://arxiv.org/pdf/1603.07771.pdf
55
+ - **GitHub:** https://github.com/DavidGrangier/wikipedia-biography-dataset
56
 
57
  ### Dataset Summary
58
 
 
69
 
70
  ### Data Instances
71
 
72
+ More Information Needed
73
 
74
  ### Data Fields
75
 
 
132
  [More Information Needed]
133
 
134
  ### Source Data
135
+ This dataset was announced in the paper <em>Neural Text Generation from Structured Data with Application to the Biography Domain</em> [(arxiv link)](https://arxiv.org/pdf/1603.07771.pdf) and is stored in [this](https://github.com/DavidGrangier/wikipedia-biography-dataset) repo (owned by DavidGrangier).
136
  #### Initial Data Collection and Normalization
137
 
138
  [More Information Needed]
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"default": {"description": "This dataset gathers 728,321 biographies from wikipedia. It aims at evaluating text generation\nalgorithms. For each article, we provide the first paragraph and the infobox (both tokenized).\nFor each article, we extracted the first paragraph (text), the infobox (structured data). Each\ninfobox is encoded as a list of (field name, field value) pairs. We used Stanford CoreNLP\n(http://stanfordnlp.github.io/CoreNLP/) to preprocess the data, i.e. we broke the text into\nsentences and tokenized both the text and the field values. The dataset was randomly split in\nthree subsets train (80%), valid (10%), test (10%).\n", "citation": "@article{DBLP:journals/corr/LebretGA16,\n author = {R{'{e}}mi Lebret and\n David Grangier and\n Michael Auli},\n title = {Generating Text from Structured Data with Application to the Biography\n Domain},\n journal = {CoRR},\n volume = {abs/1603.07771},\n year = {2016},\n url = {http://arxiv.org/abs/1603.07771},\n archivePrefix = {arXiv},\n eprint = {1603.07771},\n timestamp = {Mon, 13 Aug 2018 16:48:30 +0200},\n biburl = {https://dblp.org/rec/journals/corr/LebretGA16.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n", "homepage": "https://github.com/DavidGrangier/wikipedia-biography-dataset", "license": "CC BY-SA 3.0", "features": {"input_text": {"table": {"feature": {"column_header": {"dtype": "string", "id": null, "_type": "Value"}, "row_number": {"dtype": "int16", "id": null, "_type": "Value"}, "content": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "context": {"dtype": "string", "id": null, "_type": "Value"}}, "target_text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "input_text", "output": "target_text"}, "builder_name": "wiki_bio", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 618362475, "num_examples": 582659, "dataset_name": "wiki_bio"}, "test": {"name": "test", "num_bytes": 77151324, "num_examples": 72831, "dataset_name": "wiki_bio"}, "val": {"name": "val", "num_bytes": 77221530, "num_examples": 72831, "dataset_name": "wiki_bio"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1L7aoUXzHPzyzQ0ns4ApBbYepsjFOtXil": {"num_bytes": 333998704, "checksum": "0de0fef4cc6c9182138939134b81b6ac33ffbc989b6d23a2d9ef1e50c49b8032"}}, "download_size": 333998704, "post_processing_size": null, "dataset_size": 772735329, "size_in_bytes": 1106734033}}
 
1
+ {"default": {"description": "This dataset gathers 728,321 biographies from wikipedia. It aims at evaluating text generation\nalgorithms. For each article, we provide the first paragraph and the infobox (both tokenized).\nFor each article, we extracted the first paragraph (text), the infobox (structured data). Each\ninfobox is encoded as a list of (field name, field value) pairs. We used Stanford CoreNLP\n(http://stanfordnlp.github.io/CoreNLP/) to preprocess the data, i.e. we broke the text into\nsentences and tokenized both the text and the field values. The dataset was randomly split in\nthree subsets train (80%), valid (10%), test (10%).\n", "citation": "@article{DBLP:journals/corr/LebretGA16,\n author = {R{'{e}}mi Lebret and\n David Grangier and\n Michael Auli},\n title = {Generating Text from Structured Data with Application to the Biography\n Domain},\n journal = {CoRR},\n volume = {abs/1603.07771},\n year = {2016},\n url = {http://arxiv.org/abs/1603.07771},\n archivePrefix = {arXiv},\n eprint = {1603.07771},\n timestamp = {Mon, 13 Aug 2018 16:48:30 +0200},\n biburl = {https://dblp.org/rec/journals/corr/LebretGA16.bib},\n bibsource = {dblp computer science bibliography, https://dblp.org}\n}\n", "homepage": "https://github.com/DavidGrangier/wikipedia-biography-dataset", "license": "CC BY-SA 3.0", "features": {"input_text": {"table": {"feature": {"column_header": {"dtype": "string", "id": null, "_type": "Value"}, "row_number": {"dtype": "int16", "id": null, "_type": "Value"}, "content": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "context": {"dtype": "string", "id": null, "_type": "Value"}}, "target_text": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "input_text", "output": "target_text"}, "task_templates": null, "builder_name": "wiki_bio", "config_name": "default", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 2, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 619269257, "num_examples": 582659, "dataset_name": "wiki_bio"}, "test": {"name": "test", "num_bytes": 77264695, "num_examples": 72831, "dataset_name": "wiki_bio"}, "val": {"name": "val", "num_bytes": 77335069, "num_examples": 72831, "dataset_name": "wiki_bio"}}, "download_checksums": {"https://huggingface.co/datasets/wiki_bio/resolve/main/data/wikipedia-biography-dataset.zip": {"num_bytes": 333998704, "checksum": "0de0fef4cc6c9182138939134b81b6ac33ffbc989b6d23a2d9ef1e50c49b8032"}}, "download_size": 333998704, "post_processing_size": null, "dataset_size": 773869021, "size_in_bytes": 1107867725}}
dummy/{1.1.0 → 1.2.0}/dummy_data.zip RENAMED
File without changes
wiki_bio.py CHANGED
@@ -56,9 +56,7 @@ _HOMEPAGE = "https://github.com/DavidGrangier/wikipedia-biography-dataset"
56
 
57
  _LICENSE = "CC BY-SA 3.0"
58
 
59
- # The HuggingFace dataset library don't host the datasets but only point to the original files
60
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
61
- _URL = "https://drive.google.com/uc?export=download&id=1L7aoUXzHPzyzQ0ns4ApBbYepsjFOtXil"
62
 
63
 
64
  def _get_table(infobox_line):
@@ -92,7 +90,7 @@ def _get_table(infobox_line):
92
  class WikiBio(datasets.GeneratorBasedBuilder):
93
  """Infoboxes and first paragraph from Wikipedia biography pages."""
94
 
95
- VERSION = datasets.Version("1.1.0")
96
 
97
  def _info(self):
98
  features = datasets.Features(
@@ -121,8 +119,7 @@ class WikiBio(datasets.GeneratorBasedBuilder):
121
 
122
  def _split_generators(self, dl_manager):
123
  """Returns SplitGenerators."""
124
- my_urls = _URL
125
- data_dir = dl_manager.download_and_extract(my_urls)
126
  data_path = os.path.join(data_dir, "wikipedia-biography-dataset")
127
  return [
128
  datasets.SplitGenerator(
 
56
 
57
  _LICENSE = "CC BY-SA 3.0"
58
 
59
+ _URL = "https://huggingface.co/datasets/wiki_bio/resolve/main/data/wikipedia-biography-dataset.zip"
 
 
60
 
61
 
62
  def _get_table(infobox_line):
 
90
  class WikiBio(datasets.GeneratorBasedBuilder):
91
  """Infoboxes and first paragraph from Wikipedia biography pages."""
92
 
93
+ VERSION = datasets.Version("1.2.0")
94
 
95
  def _info(self):
96
  features = datasets.Features(
 
119
 
120
  def _split_generators(self, dl_manager):
121
  """Returns SplitGenerators."""
122
+ data_dir = dl_manager.download_and_extract(_URL)
 
123
  data_path = os.path.join(data_dir, "wikipedia-biography-dataset")
124
  return [
125
  datasets.SplitGenerator(