rajeshradhakrishnan commited on
Commit
ce6b3c6
1 Parent(s): f0280d6

first draft of loading script for ml only

Browse files
Files changed (3) hide show
  1. README.md +28 -0
  2. dataset_infos.json +81 -0
  3. malayalam_news.py +82 -0
README.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## IndicNLP News Article Classification Dataset
2
+
3
+ We used the IndicNLP text corpora to create classification datasets comprising news articles and their categories for 9 languages. The dataset is balanced across classes. The following table contains the statistics of our dataset:
4
+
5
+ | Language | Classes | Articles per Class |
6
+ | --------- | ------------------------------------------- | ------------------ |
7
+ | Bengali | entertainment, sports | 7K |
8
+ | Gujarati | business, entertainment, sports | 680 |
9
+ | Kannada | entertainment, lifestyle, sports | 10K |
10
+ | Malayalam | business, entertainment, sports, technology | 1.5K |
11
+ | Marathi | entertainment, lifestyle, sports | 1.5K |
12
+ | Oriya | business, crime, entertainment, sports | 7.5K |
13
+ | Punjabi | business, entertainment, sports, politics | 780 |
14
+ | Tamil | entertainment, politics, sport | 3.9K |
15
+ | Telugu | entertainment, business, sports | 8K |
16
+
17
+ ## Citing
18
+
19
+ If you are using any of the resources, please cite the following article:
20
+
21
+ ```
22
+ @article{kunchukuttan2020indicnlpcorpus,
23
+ title={AI4Bharat-IndicNLP Corpus: Monolingual Corpora and Word Embeddings for Indic Languages},
24
+ author={Anoop Kunchukuttan and Divyanshu Kakwani and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
25
+ year={2020},
26
+ journal={arXiv preprint arXiv:2005.00085},
27
+ }
28
+ ```
dataset_infos.json ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "default": {
3
+ "description": "The AI4Bharat-IndicNLP dataset is an ongoing effort to create a collection of large-scale, general-domain corpora \n for Indian languages. Currently, it contains 2.7 billion words for 10 Indian languages from two language families. We share \n pre-trained word embeddings trained on these corpora. We create news article category classification datasets for 9 \n languages to evaluate the embeddings. We evaluate the IndicNLP embeddings on multiple evaluation tasks.\n",
4
+ "citation": "@article{kunchukuttan2020indicnlpcorpus,\n title={AI4Bharat-IndicNLP Corpus: Monolingual Corpora and Word Embeddings for Indic Languages},\n author={Anoop Kunchukuttan and Divyanshu Kakwani and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n journal={arXiv preprint arXiv:2005.00085},\n year={2020}\n}\n",
5
+ "homepage": "https://github.com/AI4Bharat/indicnlp_corpus#indicnlp-news-article-classification-dataset",
6
+ "license": "",
7
+ "features": {
8
+ "text": {
9
+ "dtype": "string",
10
+ "id": null,
11
+ "_type": "Value"
12
+ },
13
+ "label": {
14
+ "num_classes": 4,
15
+ "names": [
16
+ "business",
17
+ "entertainment",
18
+ "sports",
19
+ "technology"
20
+ ],
21
+ "names_file": null,
22
+ "id": null,
23
+ "_type": "ClassLabel"
24
+ }
25
+ },
26
+ "post_processed": null,
27
+ "supervised_keys": null,
28
+ "task_templates": [
29
+ {
30
+ "task": "text-classification",
31
+ "text_column": "text",
32
+ "label_column": "label",
33
+ "labels": [
34
+ "business",
35
+ "entertainment",
36
+ "sports",
37
+ "technology"
38
+ ]
39
+ }
40
+ ],
41
+ "builder_name": "malayalam_news",
42
+ "config_name": "default",
43
+ "version": {
44
+ "version_str": "0.0.0",
45
+ "description": null,
46
+ "major": 0,
47
+ "minor": 0,
48
+ "patch": 0
49
+ },
50
+ "splits": {
51
+ "train": {
52
+ "name": "train",
53
+ "num_bytes": 29817351,
54
+ "num_examples": 120000,
55
+ "dataset_name": "malayalam_news"
56
+ },
57
+ "validation": {
58
+ "name": "validation",
59
+ "num_bytes": 29817351,
60
+ "num_examples": 120000,
61
+ "dataset_name": "malayalam_news"
62
+ },
63
+ "test": {
64
+ "name": "test",
65
+ "num_bytes": 1879478,
66
+ "num_examples": 7600,
67
+ "dataset_name": "malayalam_news"
68
+ }
69
+ },
70
+ "download_checksums": {
71
+ "https://storage.googleapis.com/ai4bharat-public-indic-nlp-corpora/evaluations/classification/indicnlp-news-articles.tgz": {
72
+ "num_bytes": null,
73
+ "checksum": ""
74
+ }
75
+ },
76
+ "download_size": null,
77
+ "post_processing_size": null,
78
+ "dataset_size": null,
79
+ "size_in_bytes": null
80
+ }
81
+ }
malayalam_news.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+
4
+ import datasets
5
+
6
+ from datasets.tasks import TextClassification
7
+
8
+ _DESCRIPTION = """\
9
+ The AI4Bharat-IndicNLP dataset is an ongoing effort to create a collection of large-scale,
10
+ general-domain corpora for Indian languages. Currently, it contains 2.7 billion words for 10 Indian languages from two language families.
11
+ We share pre-trained word embeddings trained on these corpora.
12
+ We create news article category classification datasets for 9 languages to evaluate the embeddings.
13
+ We evaluate the IndicNLP embeddings on multiple evaluation tasks.
14
+ """
15
+
16
+ _CITATION = """\
17
+ @article{kunchukuttan2020indicnlpcorpus,
18
+ title={AI4Bharat-IndicNLP Corpus: Monolingual Corpora and Word Embeddings for Indic Languages},
19
+ author={Anoop Kunchukuttan and Divyanshu Kakwani and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
20
+ year={2020},
21
+ journal={arXiv preprint arXiv:2005.00085},
22
+ }
23
+ """
24
+
25
+ _DOWNLOAD_URL = "https://storage.googleapis.com/ai4bharat-public-indic-nlp-corpora/evaluations/classification/indicnlp-news-articles.tgz"
26
+
27
+ class MalayalamNews(datasets.GeneratorBasedBuilder):
28
+ """Malayalam News topic classification dataset."""
29
+ def _info(self):
30
+ return datasets.DatasetInfo(
31
+ description=_DESCRIPTION,
32
+ features=datasets.Features(
33
+ {
34
+ "text": datasets.Value("string"),
35
+ "label": datasets.features.ClassLabel(names=["business", "entertainment", "sports", "technology"]),
36
+ }
37
+ ),
38
+ homepage="https://github.com/AI4Bharat/indicnlp_corpus#indicnlp-news-article-classification-dataset",
39
+ citation=_CITATION,
40
+ task_templates=[TextClassification(text_column="text", label_column="label")],
41
+ )
42
+
43
+ def _split_generators(self, dl_manager):
44
+ data_dir = dl_manager.download_and_extract(_DOWNLOAD_URL)
45
+ return [
46
+ datasets.SplitGenerator(
47
+ name=datasets.Split.TRAIN,
48
+ gen_kwargs={
49
+ "filepath": os.path.join(data_dir, "indicnlp-news-articles", "ml", "ml-train.csv"),
50
+ "split": "train",
51
+ },
52
+ ),
53
+ datasets.SplitGenerator(
54
+ name=datasets.Split.VALIDATION,
55
+ gen_kwargs={
56
+ "filepath": os.path.join(data_dir, "indicnlp-news-articles", "ml", "ml-valid.csv"),
57
+ "split": "validation",
58
+ },
59
+ ),
60
+ datasets.SplitGenerator(
61
+ name=datasets.Split.TEST,
62
+ gen_kwargs={
63
+ "filepath": os.path.join(data_dir, "indicnlp-news-articles", "ml", "ml-test.csv"),
64
+ "split": "test",
65
+ },
66
+ )
67
+ ]
68
+
69
+ def _generate_examples(self, filepath):
70
+ """Generate Malayalam News examples."""
71
+ with open(filepath, encoding="utf-8") as csv_file:
72
+ csv_reader = csv.reader(
73
+ csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
74
+ )
75
+ for id_, row in enumerate(csv_reader):
76
+ label, title, description = row
77
+ # Original labels are [1, 2, 3, 4] ->
78
+ # ['World', 'Sports', 'Business', 'Sci/Tech']
79
+ # Re-map to [0, 1, 2, 3].
80
+ label = int(label) - 1
81
+ text = " ".join((title, description))
82
+ yield id_, {"text": text, "label": label}