holylovenia commited on
Commit
abdffe7
1 Parent(s): 8d69d93

Upload id_sentiment_analysis.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. id_sentiment_analysis.py +162 -0
id_sentiment_analysis.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Dict, List, Tuple
17
+
18
+ import datasets
19
+ import pandas as pd
20
+
21
+ from seacrowd.utils import schemas
22
+ from seacrowd.utils.configs import SEACrowdConfig
23
+ from seacrowd.utils.constants import TASK_TO_SCHEMA, Licenses, Tasks
24
+
25
+ _CITATION = """\
26
+ @misc{ridife2019idsa,
27
+ author = {Fe, Ridi},
28
+ title = {Indonesia Sentiment Analysis Dataset},
29
+ year = {2019},
30
+ publisher = {GitHub},
31
+ journal = {GitHub repository},
32
+ howpublished = {\\url{https://github.com/ridife/dataset-idsa}}
33
+ }
34
+ """
35
+
36
+ _DATASETNAME = "id_sentiment_analysis"
37
+
38
+ _DESCRIPTION = """\
39
+ This dataset consists of 10806 labeled Indonesian tweets with their corresponding sentiment analysis: positive, negative, and neutral, up to 2019.
40
+ This dataset was developed in Cloud Experience Research Group, Gadjah Mada University.
41
+ There is no further explanation of the dataset. Contributor found this dataset after skimming through "Sentiment analysis of Indonesian datasets based on a hybrid deep-learning strategy" (Lin CH and Nuha U, 2023).
42
+ """
43
+
44
+ _HOMEPAGE = "https://ridi.staff.ugm.ac.id/2019/03/06/indonesia-sentiment-analysis-dataset/"
45
+
46
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
47
+
48
+ _LICENSE = Licenses.UNKNOWN.value
49
+
50
+ _LOCAL = False
51
+
52
+ _URLS = {
53
+ _DATASETNAME: "https://raw.githubusercontent.com/ridife/dataset-idsa/master/Indonesian%20Sentiment%20Twitter%20Dataset%20Labeled.csv",
54
+ }
55
+
56
+ _SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]
57
+ _SUPPORTED_SCHEMA_STRINGS = [f"seacrowd_{str(TASK_TO_SCHEMA[task]).lower()}" for task in _SUPPORTED_TASKS]
58
+
59
+ _SOURCE_VERSION = "1.0.0"
60
+
61
+ _SEACROWD_VERSION = "2024.06.20"
62
+
63
+
64
+ class IdSentimentAnalysis(datasets.GeneratorBasedBuilder):
65
+ """This dataset consists of 10806 labeled Indonesian tweets with their corresponding sentiment analysis: positive, negative, and neutral, up to 2019."""
66
+
67
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
68
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
69
+
70
+ BUILDER_CONFIGS = [
71
+ SEACrowdConfig(
72
+ name=f"{_DATASETNAME}_source",
73
+ version=SOURCE_VERSION,
74
+ description=f"{_DATASETNAME} source schema",
75
+ schema="source",
76
+ subset_id=f"{_DATASETNAME}",
77
+ ),
78
+ ]
79
+
80
+ seacrowd_schema_config: List[SEACrowdConfig] = []
81
+
82
+ for seacrowd_schema in _SUPPORTED_SCHEMA_STRINGS:
83
+
84
+ seacrowd_schema_config.append(
85
+ SEACrowdConfig(
86
+ name=f"{_DATASETNAME}_{seacrowd_schema}",
87
+ version=SEACROWD_VERSION,
88
+ description=f"{_DATASETNAME} {seacrowd_schema} schema",
89
+ schema=f"{seacrowd_schema}",
90
+ subset_id=f"{_DATASETNAME}",
91
+ )
92
+ )
93
+
94
+ BUILDER_CONFIGS.extend(seacrowd_schema_config)
95
+
96
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
97
+
98
+ def _info(self) -> datasets.DatasetInfo:
99
+
100
+ if self.config.schema == "source":
101
+ features = datasets.Features(
102
+ {
103
+ "sentimen": datasets.Value("int32"),
104
+ "tweet": datasets.Value("string"),
105
+ }
106
+ )
107
+
108
+ elif self.config.schema == f"seacrowd_{str(TASK_TO_SCHEMA[Tasks.SENTIMENT_ANALYSIS]).lower()}":
109
+ features = schemas.text_features(label_names=[1, -1, 0])
110
+
111
+ else:
112
+ raise ValueError(f"Invalid config: {self.config.name}")
113
+
114
+ return datasets.DatasetInfo(
115
+ description=_DESCRIPTION,
116
+ features=features,
117
+ homepage=_HOMEPAGE,
118
+ license=_LICENSE,
119
+ citation=_CITATION,
120
+ )
121
+
122
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
123
+ """Returns SplitGenerators."""
124
+
125
+ path = dl_manager.download_and_extract(_URLS[_DATASETNAME])
126
+
127
+ return [
128
+ datasets.SplitGenerator(
129
+ name=datasets.Split.TRAIN,
130
+ gen_kwargs={
131
+ "path": path,
132
+ },
133
+ ),
134
+ ]
135
+
136
+ def _generate_examples(self, path: str) -> Tuple[int, Dict]:
137
+ """Yields examples as (key, example) tuples."""
138
+
139
+ idx = 0
140
+
141
+ if self.config.schema == "source":
142
+ df = pd.read_csv(path, delimiter="\t")
143
+
144
+ df.rename(columns={"Tweet": "tweet"}, inplace=True)
145
+
146
+ for _, row in df.iterrows():
147
+ yield idx, row.to_dict()
148
+ idx += 1
149
+
150
+ elif self.config.schema == f"seacrowd_{str(TASK_TO_SCHEMA[Tasks.SENTIMENT_ANALYSIS]).lower()}":
151
+ df = pd.read_csv(path, delimiter="\t")
152
+
153
+ df["id"] = df.index
154
+ df.rename(columns={"sentimen": "label"}, inplace=True)
155
+ df.rename(columns={"Tweet": "text"}, inplace=True)
156
+
157
+ for _, row in df.iterrows():
158
+ yield idx, row.to_dict()
159
+ idx += 1
160
+
161
+ else:
162
+ raise ValueError(f"Invalid config: {self.config.name}")