holylovenia
commited on
Commit
•
0b61bfb
1
Parent(s):
9535a33
Upload local_id_abusive.py with huggingface_hub
Browse files- local_id_abusive.py +160 -0
local_id_abusive.py
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
from pathlib import Path
|
17 |
+
from typing import Dict, List, Tuple
|
18 |
+
|
19 |
+
import datasets
|
20 |
+
import pandas as pd
|
21 |
+
|
22 |
+
from nusacrowd.utils import schemas
|
23 |
+
from nusacrowd.utils.configs import NusantaraConfig
|
24 |
+
from nusacrowd.utils.constants import Tasks
|
25 |
+
|
26 |
+
_CITATION = """\
|
27 |
+
@inproceedings{putri2021abusive,
|
28 |
+
title={Abusive language and hate speech detection for Javanese and Sundanese languages in tweets: Dataset and preliminary study},
|
29 |
+
author={Putri, Shofianina Dwi Ananda and Ibrohim, Muhammad Okky and Budi, Indra},
|
30 |
+
booktitle={2021 11th International Workshop on Computer Science and Engineering, WCSE 2021},
|
31 |
+
pages={461--465},
|
32 |
+
year={2021},
|
33 |
+
organization={International Workshop on Computer Science and Engineering (WCSE)},
|
34 |
+
abstract={Indonesia’s demography as an archipelago with lots of tribes and local languages added variances in their communication style. Every region in Indonesia has its own distinct culture, accents, and languages. The demographical condition can influence the characteristic of the language used in social media, such as Twitter. It can be found that Indonesian uses their own local language for communicating and expressing their mind in tweets. Nowadays, research about identifying hate speech and abusive language has become an attractive and developing topic. Moreover, the research related to Indonesian local languages still rarely encountered. This paper analyzes the use of machine learning approaches such as Naïve Bayes (NB), Support Vector Machine (SVM), and Random Forest Decision Tree (RFDT) in detecting hate speech and abusive language in Sundanese and Javanese as Indonesian local languages. The classifiers were used with the several term weightings features, such as word n-grams and char n-grams. The experiments are evaluated using the F-measure. It achieves over 60 % for both local languages.}
|
35 |
+
}
|
36 |
+
"""
|
37 |
+
_DATASETNAME = "local_id_abusive"
|
38 |
+
|
39 |
+
_DESCRIPTION = """\
|
40 |
+
This dataset is for abusive and hate speech detection, using Twitter text containing Javanese and Sundanese words.
|
41 |
+
|
42 |
+
(from the publication source)
|
43 |
+
The Indonesian local language dataset collection was conducted using Twitter search API to collect the tweets and then
|
44 |
+
implemented using Tweepy Library. The tweets were collected using queries from the list of abusive words in Indonesian
|
45 |
+
tweets. The abusive words were translated into local Indonesian languages, which are Javanese and Sundanese. The
|
46 |
+
translated words are then used as queries to collect tweets containing Indonesian and local languages. The translation
|
47 |
+
process involved native speakers for each local language. The crawling process has collected a total of more than 5000
|
48 |
+
tweets. Then, the crawled data were filtered to get tweets that contain local’s vocabulary and/or sentences in Javanese
|
49 |
+
and Sundanese. Next, after the filtering process, the data will be labeled whether the tweets are labeled as hate speech
|
50 |
+
and abusive language or not.
|
51 |
+
"""
|
52 |
+
|
53 |
+
_HOMEPAGE = "https://github.com/Shofianina/local-indonesian-abusive-hate-speech-dataset"
|
54 |
+
|
55 |
+
_LICENSE = "Unknown"
|
56 |
+
|
57 |
+
_LANGUAGES = ["jav", "sun"]
|
58 |
+
_LOCAL = False
|
59 |
+
|
60 |
+
_URLS = {
|
61 |
+
_DATASETNAME: {
|
62 |
+
"jav": "https://raw.githubusercontent.com/Shofianina/local-indonesian-abusive-hate-speech-dataset/main/Javanese.csv",
|
63 |
+
"sun": "https://raw.githubusercontent.com/Shofianina/local-indonesian-abusive-hate-speech-dataset/main/Sundanese.csv",
|
64 |
+
}
|
65 |
+
}
|
66 |
+
|
67 |
+
_SUPPORTED_TASKS = [Tasks.ASPECT_BASED_SENTIMENT_ANALYSIS]
|
68 |
+
|
69 |
+
_SOURCE_VERSION = "1.0.0"
|
70 |
+
_NUSANTARA_VERSION = "1.0.0"
|
71 |
+
|
72 |
+
|
73 |
+
class LocalIDAbusive(datasets.GeneratorBasedBuilder):
|
74 |
+
"""Local ID Abusive is a dataset for abusive and hate speech detection, using Twitter text containing Javanese and
|
75 |
+
Sundanese words."""
|
76 |
+
|
77 |
+
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
78 |
+
NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
|
79 |
+
|
80 |
+
BUILDER_CONFIGS = [
|
81 |
+
NusantaraConfig(
|
82 |
+
name="local_id_abusive_jav_source",
|
83 |
+
version=SOURCE_VERSION,
|
84 |
+
description="local_id_abusive source schema Javanese",
|
85 |
+
schema="source",
|
86 |
+
subset_id="local_id_abusive_jav",
|
87 |
+
),
|
88 |
+
NusantaraConfig(
|
89 |
+
name="local_id_abusive_sun_source",
|
90 |
+
version=SOURCE_VERSION,
|
91 |
+
description="local_id_abusive source schema Sundanese",
|
92 |
+
schema="source",
|
93 |
+
subset_id="local_id_abusive_sun",
|
94 |
+
),
|
95 |
+
NusantaraConfig(
|
96 |
+
name="local_id_abusive_jav_nusantara_text_multi",
|
97 |
+
version=NUSANTARA_VERSION,
|
98 |
+
description="local_id_abusive Nusantara schema Javanese",
|
99 |
+
schema="nusantara_text_multi",
|
100 |
+
subset_id="local_id_abusive_jav",
|
101 |
+
),
|
102 |
+
NusantaraConfig(
|
103 |
+
name="local_id_abusive_sun_nusantara_text_multi",
|
104 |
+
version=NUSANTARA_VERSION,
|
105 |
+
description="local_id_abusive Nusantara schema Sundanese",
|
106 |
+
schema="nusantara_text_multi",
|
107 |
+
subset_id="local_id_abusive_sun",
|
108 |
+
),
|
109 |
+
]
|
110 |
+
|
111 |
+
DEFAULT_CONFIG_NAME = "local_id_abusive_jav_source"
|
112 |
+
|
113 |
+
def _info(self) -> datasets.DatasetInfo:
|
114 |
+
if self.config.schema == "source":
|
115 |
+
features = datasets.Features(
|
116 |
+
{
|
117 |
+
"isi_tweet": datasets.Value("string"),
|
118 |
+
"uk": datasets.Value("bool"),
|
119 |
+
"hs": datasets.Value("bool"),
|
120 |
+
}
|
121 |
+
)
|
122 |
+
elif self.config.schema == "nusantara_text_multi":
|
123 |
+
features = schemas.text_multi_features([0, 1])
|
124 |
+
|
125 |
+
return datasets.DatasetInfo(
|
126 |
+
description=_DESCRIPTION,
|
127 |
+
features=features,
|
128 |
+
homepage=_HOMEPAGE,
|
129 |
+
license=_LICENSE,
|
130 |
+
citation=_CITATION,
|
131 |
+
)
|
132 |
+
|
133 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
134 |
+
lang = self.config.name.split("_")[3]
|
135 |
+
urls = _URLS[_DATASETNAME][lang]
|
136 |
+
data_dir = dl_manager.download_and_extract(urls)
|
137 |
+
return [
|
138 |
+
datasets.SplitGenerator(
|
139 |
+
name=datasets.Split.TRAIN,
|
140 |
+
# Whatever you put in gen_kwargs will be passed to _generate_examples
|
141 |
+
gen_kwargs={
|
142 |
+
"filepath": data_dir,
|
143 |
+
"split": "train",
|
144 |
+
},
|
145 |
+
),
|
146 |
+
]
|
147 |
+
|
148 |
+
def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
|
149 |
+
df = pd.read_csv(filepath, sep=",", encoding="ISO-8859-1").reset_index()
|
150 |
+
for i, row in enumerate(df.itertuples()):
|
151 |
+
if self.config.schema == "source":
|
152 |
+
example = {"isi_tweet": row.isi_tweet, "uk": row.uk, "hs": row.hs}
|
153 |
+
yield i, example
|
154 |
+
elif self.config.schema == "nusantara_text_multi":
|
155 |
+
example = {
|
156 |
+
"id": str(i),
|
157 |
+
"text": row.isi_tweet,
|
158 |
+
"labels": [row.uk, row.hs],
|
159 |
+
}
|
160 |
+
yield i, example
|