Datasets:
Tasks:
Text Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
multi-class-classification
Languages:
Urdu
Size:
10K - 100K
Tags:
binary classification
License:
Commit
•
d1e9fb2
1
Parent(s):
dbe7278
Convert dataset to Parquet (#4)
Browse files- Convert dataset to Parquet (cb7341cdb8eef191bb9dbcabdf8556be9123d096)
- Add 'Fine_Grained' config data files (d5f0059b8dffd815dbabcffc64258bdff068f9ff)
- Delete loading script (dcd3eae3caafb307d7edacb56da4db20b0fe6bcd)
- Coarse_Grained/test-00000-of-00001.parquet +3 -0
- Coarse_Grained/train-00000-of-00001.parquet +3 -0
- Coarse_Grained/validation-00000-of-00001.parquet +3 -0
- Fine_Grained/test-00000-of-00001.parquet +3 -0
- Fine_Grained/train-00000-of-00001.parquet +3 -0
- Fine_Grained/validation-00000-of-00001.parquet +3 -0
- README.md +28 -10
- roman_urdu_hate_speech.py +0 -210
Coarse_Grained/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b5e07f4849ff9ecd0c216844626fefd1c3c4afd1439bd0b16dcb914714f1f641
|
3 |
+
size 147029
|
Coarse_Grained/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:974c71ca5261efae4761268b9e86fbd2dc97ab9ac2456efafefffa2a05257ac0
|
3 |
+
size 525200
|
Coarse_Grained/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:850a3d809bb192a6436046a055e5b6b8d62d215b2f4d1c3a38d0c6fda1100d58
|
3 |
+
size 58491
|
Fine_Grained/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:202503e8552bfe140c83d8a8c4790cc8b2c95a89c6e7f11497586246e4586ea3
|
3 |
+
size 147890
|
Fine_Grained/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:00eadf726850f32bb4f338f2700387d6113e4dae764e0423cd9cc7aaf1d02b15
|
3 |
+
size 525885
|
Fine_Grained/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:00eadf726850f32bb4f338f2700387d6113e4dae764e0423cd9cc7aaf1d02b15
|
3 |
+
size 525885
|
README.md
CHANGED
@@ -33,16 +33,16 @@ dataset_info:
|
|
33 |
'1': Normal
|
34 |
splits:
|
35 |
- name: train
|
36 |
-
num_bytes:
|
37 |
num_examples: 7208
|
38 |
- name: test
|
39 |
-
num_bytes:
|
40 |
num_examples: 2002
|
41 |
- name: validation
|
42 |
-
num_bytes:
|
43 |
num_examples: 800
|
44 |
-
download_size:
|
45 |
-
dataset_size:
|
46 |
- config_name: Fine_Grained
|
47 |
features:
|
48 |
- name: tweet
|
@@ -58,16 +58,34 @@ dataset_info:
|
|
58 |
'4': Profane/Untargeted
|
59 |
splits:
|
60 |
- name: train
|
61 |
-
num_bytes:
|
62 |
num_examples: 7208
|
63 |
- name: test
|
64 |
-
num_bytes:
|
65 |
num_examples: 2002
|
66 |
- name: validation
|
67 |
-
num_bytes:
|
68 |
num_examples: 7208
|
69 |
-
download_size:
|
70 |
-
dataset_size:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
---
|
72 |
|
73 |
# Dataset Card for roman_urdu_hate_speech
|
|
|
33 |
'1': Normal
|
34 |
splits:
|
35 |
- name: train
|
36 |
+
num_bytes: 725715
|
37 |
num_examples: 7208
|
38 |
- name: test
|
39 |
+
num_bytes: 202318
|
40 |
num_examples: 2002
|
41 |
- name: validation
|
42 |
+
num_bytes: 79755
|
43 |
num_examples: 800
|
44 |
+
download_size: 730720
|
45 |
+
dataset_size: 1007788
|
46 |
- config_name: Fine_Grained
|
47 |
features:
|
48 |
- name: tweet
|
|
|
58 |
'4': Profane/Untargeted
|
59 |
splits:
|
60 |
- name: train
|
61 |
+
num_bytes: 723666
|
62 |
num_examples: 7208
|
63 |
- name: test
|
64 |
+
num_bytes: 203590
|
65 |
num_examples: 2002
|
66 |
- name: validation
|
67 |
+
num_bytes: 723666
|
68 |
num_examples: 7208
|
69 |
+
download_size: 1199660
|
70 |
+
dataset_size: 1650922
|
71 |
+
configs:
|
72 |
+
- config_name: Coarse_Grained
|
73 |
+
data_files:
|
74 |
+
- split: train
|
75 |
+
path: Coarse_Grained/train-*
|
76 |
+
- split: test
|
77 |
+
path: Coarse_Grained/test-*
|
78 |
+
- split: validation
|
79 |
+
path: Coarse_Grained/validation-*
|
80 |
+
default: true
|
81 |
+
- config_name: Fine_Grained
|
82 |
+
data_files:
|
83 |
+
- split: train
|
84 |
+
path: Fine_Grained/train-*
|
85 |
+
- split: test
|
86 |
+
path: Fine_Grained/test-*
|
87 |
+
- split: validation
|
88 |
+
path: Fine_Grained/validation-*
|
89 |
---
|
90 |
|
91 |
# Dataset Card for roman_urdu_hate_speech
|
roman_urdu_hate_speech.py
DELETED
@@ -1,210 +0,0 @@
|
|
1 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
"""roman_urdu_hate_speech dataset"""
|
15 |
-
|
16 |
-
|
17 |
-
import csv
|
18 |
-
|
19 |
-
import datasets
|
20 |
-
from datasets.tasks import TextClassification
|
21 |
-
|
22 |
-
|
23 |
-
# Find for instance the citation on arxiv or on the dataset repo/website
|
24 |
-
_CITATION = """\
|
25 |
-
@inproceedings{rizwan2020hate,
|
26 |
-
title={Hate-speech and offensive language detection in roman Urdu},
|
27 |
-
author={Rizwan, Hammad and Shakeel, Muhammad Haroon and Karim, Asim},
|
28 |
-
booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)},
|
29 |
-
pages={2512--2522},
|
30 |
-
year={2020}
|
31 |
-
}
|
32 |
-
"""
|
33 |
-
|
34 |
-
# You can copy an official description
|
35 |
-
_DESCRIPTION = """\
|
36 |
-
The Roman Urdu Hate-Speech and Offensive Language Detection (RUHSOLD) dataset is a \
|
37 |
-
Roman Urdu dataset of tweets annotated by experts in the relevant language. \
|
38 |
-
The authors develop the gold-standard for two sub-tasks. \
|
39 |
-
First sub-task is based on binary labels of Hate-Offensive content and Normal content (i.e., inoffensive language). \
|
40 |
-
These labels are self-explanatory. \
|
41 |
-
The authors refer to this sub-task as coarse-grained classification. \
|
42 |
-
Second sub-task defines Hate-Offensive content with \
|
43 |
-
four labels at a granular level. \
|
44 |
-
These labels are the most relevant for the demographic of users who converse in RU and \
|
45 |
-
are defined in related literature. The authors refer to this sub-task as fine-grained classification. \
|
46 |
-
The objective behind creating two gold-standards is to enable the researchers to evaluate the hate speech detection \
|
47 |
-
approaches on both easier (coarse-grained) and challenging (fine-grained) scenarios. \
|
48 |
-
"""
|
49 |
-
|
50 |
-
_HOMEPAGE = "https://github.com/haroonshakeel/roman_urdu_hate_speech"
|
51 |
-
|
52 |
-
_LICENSE = "MIT License"
|
53 |
-
|
54 |
-
_Download_URL = "https://raw.githubusercontent.com/haroonshakeel/roman_urdu_hate_speech/main/"
|
55 |
-
|
56 |
-
# The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
|
57 |
-
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
58 |
-
_URLS = {
|
59 |
-
"Coarse_Grained_train": _Download_URL + "task_1_train.tsv",
|
60 |
-
"Coarse_Grained_validation": _Download_URL + "task_1_validation.tsv",
|
61 |
-
"Coarse_Grained_test": _Download_URL + "task_1_test.tsv",
|
62 |
-
"Fine_Grained_train": _Download_URL + "task_2_train.tsv",
|
63 |
-
"Fine_Grained_validation": _Download_URL + "task_2_validation.tsv",
|
64 |
-
"Fine_Grained_test": _Download_URL + "task_2_test.tsv",
|
65 |
-
}
|
66 |
-
|
67 |
-
|
68 |
-
class RomanUrduHateSpeechConfig(datasets.BuilderConfig):
|
69 |
-
"""BuilderConfig for RomanUrduHateSpeech Config"""
|
70 |
-
|
71 |
-
def __init__(self, **kwargs):
|
72 |
-
"""BuilderConfig for RomanUrduHateSpeech Config.
|
73 |
-
Args:
|
74 |
-
**kwargs: keyword arguments forwarded to super.
|
75 |
-
"""
|
76 |
-
super(RomanUrduHateSpeechConfig, self).__init__(**kwargs)
|
77 |
-
|
78 |
-
|
79 |
-
class RomanUrduHateSpeech(datasets.GeneratorBasedBuilder):
|
80 |
-
"""Roman Urdu Hate Speech dataset"""
|
81 |
-
|
82 |
-
VERSION = datasets.Version("1.1.0")
|
83 |
-
|
84 |
-
# This is an example of a dataset with multiple configurations.
|
85 |
-
# If you don't want/need to define several sub-sets in your dataset,
|
86 |
-
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
|
87 |
-
|
88 |
-
# If you need to make complex sub-parts in the datasets with configurable options
|
89 |
-
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
90 |
-
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
91 |
-
|
92 |
-
# You will be able to load one or the other configurations in the following list with
|
93 |
-
# data = datasets.load_dataset('my_dataset', 'first_domain')
|
94 |
-
# data = datasets.load_dataset('my_dataset', 'second_domain')
|
95 |
-
BUILDER_CONFIGS = [
|
96 |
-
RomanUrduHateSpeechConfig(
|
97 |
-
name="Coarse_Grained",
|
98 |
-
version=VERSION,
|
99 |
-
description="This part of my dataset covers the Coarse Grained dataset",
|
100 |
-
),
|
101 |
-
RomanUrduHateSpeechConfig(
|
102 |
-
name="Fine_Grained", version=VERSION, description="This part of my dataset covers the Fine Grained dataset"
|
103 |
-
),
|
104 |
-
]
|
105 |
-
|
106 |
-
DEFAULT_CONFIG_NAME = "Coarse_Grained"
|
107 |
-
# It's not mandatory to have a default configuration. Just use one if it makes sense.
|
108 |
-
|
109 |
-
def _info(self):
|
110 |
-
|
111 |
-
if self.config.name == "Coarse_Grained":
|
112 |
-
features = datasets.Features(
|
113 |
-
{
|
114 |
-
"tweet": datasets.Value("string"),
|
115 |
-
"label": datasets.features.ClassLabel(names=["Abusive/Offensive", "Normal"]),
|
116 |
-
# These are the features of your dataset like images, labels ...
|
117 |
-
}
|
118 |
-
)
|
119 |
-
if self.config.name == "Fine_Grained":
|
120 |
-
features = datasets.Features(
|
121 |
-
{
|
122 |
-
"tweet": datasets.Value("string"),
|
123 |
-
"label": datasets.features.ClassLabel(
|
124 |
-
names=["Abusive/Offensive", "Normal", "Religious Hate", "Sexism", "Profane/Untargeted"]
|
125 |
-
),
|
126 |
-
# These are the features of your dataset like images, labels ...
|
127 |
-
}
|
128 |
-
)
|
129 |
-
return datasets.DatasetInfo(
|
130 |
-
# This is the description that will appear on the datasets page.
|
131 |
-
description=_DESCRIPTION,
|
132 |
-
# This defines the different columns of the dataset and their types
|
133 |
-
features=features, # Here we define them above because they are different between the two configurations
|
134 |
-
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
|
135 |
-
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
|
136 |
-
# supervised_keys=("sentence", "label"),
|
137 |
-
# Homepage of the dataset for documentation
|
138 |
-
homepage=_HOMEPAGE,
|
139 |
-
# License for the dataset if available
|
140 |
-
license=_LICENSE,
|
141 |
-
# Citation for the dataset
|
142 |
-
citation=_CITATION,
|
143 |
-
task_templates=[TextClassification(text_column="tweet", label_column="label")],
|
144 |
-
)
|
145 |
-
|
146 |
-
def _split_generators(self, dl_manager):
|
147 |
-
|
148 |
-
urls_train = _URLS[self.config.name + "_train"]
|
149 |
-
|
150 |
-
urls_validate = _URLS[self.config.name + "_validation"]
|
151 |
-
|
152 |
-
urls_test = _URLS[self.config.name + "_test"]
|
153 |
-
|
154 |
-
data_dir_train = dl_manager.download_and_extract(urls_train)
|
155 |
-
|
156 |
-
data_dir_validate = dl_manager.download_and_extract(urls_validate)
|
157 |
-
|
158 |
-
data_dir_test = dl_manager.download_and_extract(urls_test)
|
159 |
-
|
160 |
-
return [
|
161 |
-
datasets.SplitGenerator(
|
162 |
-
name=datasets.Split.TRAIN,
|
163 |
-
# These kwargs will be passed to _generate_examples
|
164 |
-
gen_kwargs={
|
165 |
-
"filepath": data_dir_train,
|
166 |
-
"split": "train",
|
167 |
-
},
|
168 |
-
),
|
169 |
-
datasets.SplitGenerator(
|
170 |
-
name=datasets.Split.TEST,
|
171 |
-
# These kwargs will be passed to _generate_examples
|
172 |
-
gen_kwargs={
|
173 |
-
"filepath": data_dir_test,
|
174 |
-
"split": "test",
|
175 |
-
},
|
176 |
-
),
|
177 |
-
datasets.SplitGenerator(
|
178 |
-
name=datasets.Split.VALIDATION,
|
179 |
-
# These kwargs will be passed to _generate_examples
|
180 |
-
gen_kwargs={
|
181 |
-
"filepath": data_dir_validate,
|
182 |
-
"split": "dev",
|
183 |
-
},
|
184 |
-
),
|
185 |
-
]
|
186 |
-
|
187 |
-
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
188 |
-
def _generate_examples(self, filepath, split):
|
189 |
-
|
190 |
-
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
191 |
-
with open(filepath, encoding="utf-8") as tsv_file:
|
192 |
-
tsv_reader = csv.reader(tsv_file, quotechar="|", delimiter="\t", quoting=csv.QUOTE_ALL)
|
193 |
-
for key, row in enumerate(tsv_reader):
|
194 |
-
if key == 0:
|
195 |
-
continue
|
196 |
-
if self.config.name == "Coarse_Grained":
|
197 |
-
tweet, label = row
|
198 |
-
label = int(label)
|
199 |
-
yield key, {
|
200 |
-
"tweet": tweet,
|
201 |
-
"label": None if split == "test" else label,
|
202 |
-
}
|
203 |
-
if self.config.name == "Fine_Grained":
|
204 |
-
tweet, label = row
|
205 |
-
label = int(label)
|
206 |
-
yield key, {
|
207 |
-
"tweet": tweet,
|
208 |
-
"label": None if split == "test" else label,
|
209 |
-
}
|
210 |
-
# Yields examples as (key, example) tuples
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|