albertvillanova HF Staff commited on
Commit
64760fd
·
verified ·
1 Parent(s): 3c8cc81

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (99c109824ce14ddd085e7d7316beec3833551f0b)
- Delete loading script (0a1a067458ec4e5b870ab301ad289975d332af46)
- Delete legacy dataset_infos.json (dd632434c3db8b7be5e3de5b3e2659ea1fb33949)

README.md ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ dataset_info:
3
+ features:
4
+ - name: text
5
+ dtype: string
6
+ - name: label
7
+ dtype:
8
+ class_label:
9
+ names:
10
+ '0': World
11
+ '1': Sports
12
+ '2': Business
13
+ '3': Sci/Tech
14
+ splits:
15
+ - name: train
16
+ num_bytes: 29817303
17
+ num_examples: 120000
18
+ - name: test
19
+ num_bytes: 1879474
20
+ num_examples: 7600
21
+ download_size: 19820267
22
+ dataset_size: 31696777
23
+ configs:
24
+ - config_name: default
25
+ data_files:
26
+ - split: train
27
+ path: data/train-*
28
+ - split: test
29
+ path: data/test-*
30
+ ---
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f43f50be12dbc50a87a88f7cf78d32ed16651d402bb63b590b105cd2ec54f346
3
+ size 1234829
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b0df9c3bc6b51f5ccb303f8e622b2a7a932b3d21c6f0d38a1e403e7b947d780
3
+ size 18585438
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "AG is a collection of more than 1 million news articles. News articles have been\ngathered from more than 2000 news sources by ComeToMyHead in more than 1 year of\nactivity. ComeToMyHead is an academic news search engine which has been running\nsince July, 2004. The dataset is provided by the academic comunity for research\npurposes in data mining (clustering, classification, etc), information retrieval\n(ranking, search, etc), xml, data compression, data streaming, and any other\nnon-commercial activity. For more information, please refer to the link\nhttp://www.di.unipi.it/~gulli/AG_corpus_of_news_articles.html .\n\nThe AG's news topic classification dataset is constructed by Xiang Zhang\n(xiang.zhang@nyu.edu) from the dataset above. It is used as a text\nclassification benchmark in the following paper: Xiang Zhang, Junbo Zhao, Yann\nLeCun. Character-level Convolutional Networks for Text Classification. Advances\nin Neural Information Processing Systems 28 (NIPS 2015).\n", "citation": "@inproceedings{Zhang2015CharacterlevelCN,\n title={Character-level Convolutional Networks for Text Classification},\n author={Xiang Zhang and Junbo Jake Zhao and Yann LeCun},\n booktitle={NIPS},\n year={2015}\n}\n", "homepage": "http://groups.di.unipi.it/~gulli/AG_corpus_of_news_articles.html", "license": "", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 4, "names": ["World", "Sports", "Business", "Sci/Tech"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "text-classification", "text_column": "text", "label_column": "label", "labels": ["Business", "Sci/Tech", "Sports", "World"]}], "builder_name": "ag_news", "config_name": "default", "version": {"version_str": "0.0.0", "description": null, "major": 0, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 29817351, "num_examples": 120000, "dataset_name": "ag_news"}, "test": {"name": "test", "num_bytes": 1879478, "num_examples": 7600, "dataset_name": "ag_news"}}, "download_checksums": {"https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/train.csv": {"num_bytes": 29470338, "checksum": "76a0a2d2f92b286371fe4d4044640910a04a803fdd2538e0f3f29a5c6f6b672e"}, "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/test.csv": {"num_bytes": 1857427, "checksum": "521465c2428ed7f02f8d6db6ffdd4b5447c1c701962353eb2c40d548c3c85699"}}, "download_size": 31327765, "post_processing_size": null, "dataset_size": 31696829, "size_in_bytes": 63024594}}
 
 
labels.py DELETED
@@ -1,93 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """AG News topic classification dataset."""
18
-
19
-
20
- import csv
21
-
22
- import datasets
23
- from datasets.tasks import TextClassification
24
-
25
-
26
- _DESCRIPTION = """\
27
- AG is a collection of more than 1 million news articles. News articles have been
28
- gathered from more than 2000 news sources by ComeToMyHead in more than 1 year of
29
- activity. ComeToMyHead is an academic news search engine which has been running
30
- since July, 2004. The dataset is provided by the academic comunity for research
31
- purposes in data mining (clustering, classification, etc), information retrieval
32
- (ranking, search, etc), xml, data compression, data streaming, and any other
33
- non-commercial activity. For more information, please refer to the link
34
- http://www.di.unipi.it/~gulli/AG_corpus_of_news_articles.html .
35
- The AG's news topic classification dataset is constructed by Xiang Zhang
36
- (xiang.zhang@nyu.edu) from the dataset above. It is used as a text
37
- classification benchmark in the following paper: Xiang Zhang, Junbo Zhao, Yann
38
- LeCun. Character-level Convolutional Networks for Text Classification. Advances
39
- in Neural Information Processing Systems 28 (NIPS 2015).
40
- """
41
-
42
- _CITATION = """\
43
- @inproceedings{Zhang2015CharacterlevelCN,
44
- title={Character-level Convolutional Networks for Text Classification},
45
- author={Xiang Zhang and Junbo Jake Zhao and Yann LeCun},
46
- booktitle={NIPS},
47
- year={2015}
48
- }
49
- """
50
-
51
- _TRAIN_DOWNLOAD_URL = "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/train.csv"
52
- _TEST_DOWNLOAD_URL = "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/test.csv"
53
-
54
-
55
- class AGNews(datasets.GeneratorBasedBuilder):
56
- """AG News topic classification dataset."""
57
-
58
- def _info(self):
59
- return datasets.DatasetInfo(
60
- description=_DESCRIPTION,
61
- features=datasets.Features(
62
- {
63
- "text": datasets.Value("string"),
64
- "label": datasets.features.ClassLabel(names=["World", "Sports", "Business", "Sci/Tech"]),
65
- }
66
- ),
67
- homepage="http://groups.di.unipi.it/~gulli/AG_corpus_of_news_articles.html",
68
- citation=_CITATION,
69
- task_templates=[TextClassification(text_column="text", label_column="label")],
70
- )
71
-
72
- def _split_generators(self, dl_manager):
73
- train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
74
- test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
75
- return [
76
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
77
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
78
- ]
79
-
80
- def _generate_examples(self, filepath):
81
- """Generate AG News examples."""
82
- with open(filepath, encoding="utf-8") as csv_file:
83
- csv_reader = csv.reader(
84
- csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
85
- )
86
- for id_, row in enumerate(csv_reader):
87
- label, title, description = row
88
- # Original labels are [1, 2, 3, 4] ->
89
- # ['World', 'Sports', 'Business', 'Sci/Tech']
90
- # Re-map to [0, 1, 2, 3].
91
- label = int(label) - 1
92
- text = " ".join((title, description))
93
- yield id_, {"text": text, "label": label}