system HF staff commited on
Commit
dbd35be
·
1 Parent(s): ce1b809

Update files from the datasets library (from 1.8.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.8.0

Files changed (2) hide show
  1. dataset_infos.json +1 -1
  2. roman_urdu.py +2 -0
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"default": {"description": "This is an extensive compilation of Roman Urdu Dataset (Urdu written in Latin/Roman script) tagged for sentiment analysis.\n", "citation": "@InProceedings{Sharf:2018,\ntitle = \"Performing Natural Language Processing on Roman Urdu Datasets\",\nauthors = \"Zareen Sharf and Saif Ur Rahman\",\nbooktitle = \"International Journal of Computer Science and Network Security\",\nvolume = \"18\",\nnumber = \"1\",\npages = \"141-148\",\nyear = \"2018\"\n}\n\n@misc{Dua:2019,\nauthor = \"Dua, Dheeru and Graff, Casey\",\nyear = \"2017\",\ntitle = \"{UCI} Machine Learning Repository\",\nurl = \"http://archive.ics.uci.edu/ml\",\ninstitution = \"University of California, Irvine, School of Information and Computer Sciences\"\n}\n", "homepage": "https://archive.ics.uci.edu/ml/datasets/Roman+Urdu+Data+Set", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "sentiment": {"num_classes": 3, "names": ["Positive", "Negative", "Neutral"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "roman_urdu_dataset", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1633423, "num_examples": 20229, "dataset_name": "roman_urdu_dataset"}}, "download_checksums": {"https://archive.ics.uci.edu/ml/machine-learning-databases/00458/Roman%20Urdu%20DataSet.csv": {"num_bytes": 1628349, "checksum": "d9b54d3685b6ef691cb4a2cc0783fc189dcfa0560cca9afec1099b015c798c7c"}}, "download_size": 1628349, "post_processing_size": null, "dataset_size": 1633423, "size_in_bytes": 3261772}}
 
1
+ {"default": {"description": "This is an extensive compilation of Roman Urdu Dataset (Urdu written in Latin/Roman script) tagged for sentiment analysis.\n", "citation": "@InProceedings{Sharf:2018,\ntitle = \"Performing Natural Language Processing on Roman Urdu Datasets\",\nauthors = \"Zareen Sharf and Saif Ur Rahman\",\nbooktitle = \"International Journal of Computer Science and Network Security\",\nvolume = \"18\",\nnumber = \"1\",\npages = \"141-148\",\nyear = \"2018\"\n}\n\n@misc{Dua:2019,\nauthor = \"Dua, Dheeru and Graff, Casey\",\nyear = \"2017\",\ntitle = \"{UCI} Machine Learning Repository\",\nurl = \"http://archive.ics.uci.edu/ml\",\ninstitution = \"University of California, Irvine, School of Information and Computer Sciences\"\n}\n", "homepage": "https://archive.ics.uci.edu/ml/datasets/Roman+Urdu+Data+Set", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "sentiment": {"num_classes": 3, "names": ["Positive", "Negative", "Neutral"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": [{"task": "text-classification", "text_column": "sentence", "label_column": "sentiment", "labels": ["Negative", "Neutral", "Positive"]}], "builder_name": "roman_urdu", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1633423, "num_examples": 20229, "dataset_name": "roman_urdu"}}, "download_checksums": {"https://archive.ics.uci.edu/ml/machine-learning-databases/00458/Roman%20Urdu%20DataSet.csv": {"num_bytes": 1628349, "checksum": "d9b54d3685b6ef691cb4a2cc0783fc189dcfa0560cca9afec1099b015c798c7c"}}, "download_size": 1628349, "post_processing_size": null, "dataset_size": 1633423, "size_in_bytes": 3261772}}
roman_urdu.py CHANGED
@@ -19,6 +19,7 @@ import csv
19
  import os
20
 
21
  import datasets
 
22
 
23
 
24
  _CITATION = """\
@@ -67,6 +68,7 @@ class RomanUrdu(datasets.GeneratorBasedBuilder):
67
  supervised_keys=None,
68
  homepage=_HOMEPAGE,
69
  citation=_CITATION,
 
70
  )
71
 
72
  def _split_generators(self, dl_manager):
 
19
  import os
20
 
21
  import datasets
22
+ from datasets.tasks import TextClassification
23
 
24
 
25
  _CITATION = """\
 
68
  supervised_keys=None,
69
  homepage=_HOMEPAGE,
70
  citation=_CITATION,
71
+ task_templates=[TextClassification(text_column="sentence", label_column="sentiment")],
72
  )
73
 
74
  def _split_generators(self, dl_manager):