🐉 usable dataset
Browse files- data/data.jsonl.gz +0 -3
- many_emotions.py +32 -117
- requirements.txt +1 -2
data/data.jsonl.gz
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:8944e6b35cb42294769ac30cf17bd006231545b2eeecfa59324246e192564d1f
|
3 |
-
size 15388281
|
|
|
|
|
|
|
|
many_emotions.py
CHANGED
@@ -1,29 +1,10 @@
|
|
1 |
import json
|
2 |
-
import os
|
3 |
-
import zipfile
|
4 |
from typing import List
|
5 |
|
6 |
import datasets
|
7 |
-
import pandas as pd
|
8 |
from datasets import ClassLabel, Value, load_dataset
|
9 |
|
10 |
_URLS = {
|
11 |
-
"go_emotions": {
|
12 |
-
"urls": [
|
13 |
-
"https://storage.googleapis.com/gresearch/goemotions/data/full_dataset/goemotions_1.csv",
|
14 |
-
"https://storage.googleapis.com/gresearch/goemotions/data/full_dataset/goemotions_2.csv",
|
15 |
-
"https://storage.googleapis.com/gresearch/goemotions/data/full_dataset/goemotions_3.csv",
|
16 |
-
],
|
17 |
-
"license": "apache license 2.0"
|
18 |
-
},
|
19 |
-
"daily_dialog": {
|
20 |
-
"urls": ["http://yanran.li/files/ijcnlp_dailydialog.zip"],
|
21 |
-
"license": "CC BY-NC-SA 4.0"
|
22 |
-
},
|
23 |
-
"emotion": {
|
24 |
-
"data": ["data/data.jsonl.gz"],
|
25 |
-
"license": "educational/research"
|
26 |
-
}
|
27 |
}
|
28 |
|
29 |
_SUB_CLASSES = [
|
@@ -81,124 +62,52 @@ class EmotionsDatasetConfig(datasets.BuilderConfig):
|
|
81 |
class EmotionsDataset(datasets.GeneratorBasedBuilder):
|
82 |
BUILDER_CONFIGS = [
|
83 |
EmotionsDatasetConfig(
|
84 |
-
name="
|
85 |
-
label_classes=
|
86 |
-
features=["text", "label", "dataset", "license"]
|
87 |
),
|
88 |
EmotionsDatasetConfig(
|
89 |
-
name="
|
90 |
label_classes=_SUB_CLASSES,
|
91 |
-
features=["text", "label", "dataset", "license"]
|
92 |
)
|
93 |
]
|
94 |
|
95 |
DEFAULT_CONFIG_NAME = "all"
|
96 |
|
97 |
def _info(self):
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
)
|
109 |
-
)
|
110 |
-
else:
|
111 |
-
return datasets.DatasetInfo(
|
112 |
-
features=datasets.Features(
|
113 |
-
{
|
114 |
-
"id": datasets.Value("string"),
|
115 |
-
'text': Value(dtype='string', id=None),
|
116 |
-
'label': ClassLabel(names=_SUB_CLASSES, id=None),
|
117 |
-
'dataset': Value(dtype='string', id=None),
|
118 |
-
'license': Value(dtype='string', id=None)
|
119 |
-
}
|
120 |
-
)
|
121 |
)
|
|
|
122 |
|
123 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
124 |
splits = []
|
125 |
-
if self.config.name == "
|
126 |
-
for k, v in _URLS.items():
|
127 |
-
downloaded_files = dl_manager.download_and_extract(v.get("urls", v.get("data")))
|
128 |
-
splits.append(datasets.SplitGenerator(name=k,
|
129 |
-
gen_kwargs={"filepaths": downloaded_files,
|
130 |
-
"dataset": k,
|
131 |
-
"license": v.get("license")}))
|
132 |
-
else:
|
133 |
downloaded_files = dl_manager.download_and_extract(["data/many_emotions.json.gz"])
|
134 |
for lang in ["en", "fr", "it", "es", "de"]:
|
135 |
splits.append(datasets.SplitGenerator(name=lang,
|
136 |
gen_kwargs={"filepaths": downloaded_files,
|
137 |
"language": lang,
|
138 |
-
"dataset": "
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
return splits
|
140 |
|
141 |
-
def process_daily_dialog(self, filepaths, dataset):
|
142 |
-
# TODO move outside
|
143 |
-
emo_mapping = {0: "no emotion", 1: "anger", 2: "disgust",
|
144 |
-
3: "fear", 4: "happiness", 5: "sadness", 6: "surprise"}
|
145 |
-
for i, filepath in enumerate(filepaths):
|
146 |
-
if os.path.isdir(filepath):
|
147 |
-
emotions = open(os.path.join(filepath, "ijcnlp_dailydialog/dialogues_emotion.txt"), "r").read()
|
148 |
-
text = open(os.path.join(filepath, "ijcnlp_dailydialog/dialogues_text.txt"), "r").read()
|
149 |
-
else:
|
150 |
-
# TODO check if this can be removed
|
151 |
-
archive = zipfile.ZipFile(filepath, 'r')
|
152 |
-
emotions = archive.open("ijcnlp_dailydialog/dialogues_emotion.txt", "r").read().decode()
|
153 |
-
text = archive.open("ijcnlp_dailydialog/dialogues_text.txt", "r").read().decode()
|
154 |
-
emotions = emotions.split("\n")
|
155 |
-
text = text.split("\n")
|
156 |
-
|
157 |
-
for idx_out, (e, t) in enumerate(zip(emotions, text)):
|
158 |
-
if len(t.strip()) > 0:
|
159 |
-
cast_emotions = [int(j) for j in e.strip().split(" ")]
|
160 |
-
cast_dialog = [d.strip() for d in t.split("__eou__") if len(d)]
|
161 |
-
for idx_in, (ce, ct) in enumerate(zip(cast_emotions, cast_dialog)):
|
162 |
-
uid = f"daily_dialog_{i}_{idx_out}_{idx_in}"
|
163 |
-
yield uid, {"text": ct,
|
164 |
-
"id": uid,
|
165 |
-
"dataset": dataset,
|
166 |
-
"license": license,
|
167 |
-
"label": emo_mapping[ce]}
|
168 |
-
|
169 |
def _generate_examples(self, filepaths, dataset, license=None, language=None):
|
170 |
-
if dataset == "
|
171 |
-
for i, filepath in enumerate(filepaths):
|
172 |
-
df = pd.read_csv(filepath)
|
173 |
-
current_classes = list(set(df.columns).intersection(set(_CLASS_NAMES)))
|
174 |
-
df = df[["text"] + current_classes]
|
175 |
-
df = df[df[current_classes].sum(axis=1) == 1].reset_index(drop=True)
|
176 |
-
for row_idx, row in df.iterrows():
|
177 |
-
uid = f"go_emotions_{i}_{row_idx}"
|
178 |
-
yield uid, {"text": row["text"],
|
179 |
-
"id": uid,
|
180 |
-
"dataset": dataset,
|
181 |
-
"license": license,
|
182 |
-
"label": row[current_classes][row == 1].index.item()}
|
183 |
-
elif dataset == "daily_dialog":
|
184 |
-
for d in self.process_daily_dialog(filepaths, dataset):
|
185 |
-
yield d
|
186 |
-
elif dataset == "emotion":
|
187 |
-
emo_mapping = {0: "sadness", 1: "joy", 2: "love",
|
188 |
-
3: "anger", 4: "fear", 5: "surprise"}
|
189 |
-
for i, filepath in enumerate(filepaths):
|
190 |
-
with open(filepath, encoding="utf-8") as f:
|
191 |
-
for idx, line in enumerate(f):
|
192 |
-
uid = f"{dataset}_{idx}"
|
193 |
-
example = json.loads(line)
|
194 |
-
example.update({
|
195 |
-
"id": uid,
|
196 |
-
"dataset": dataset,
|
197 |
-
"license": license,
|
198 |
-
"label": emo_mapping[example["label"]]
|
199 |
-
})
|
200 |
-
yield uid, example
|
201 |
-
elif dataset == "many_emotions":
|
202 |
for i, filepath in enumerate(filepaths):
|
203 |
with open(filepath, encoding="utf-8") as f:
|
204 |
for idx, line in enumerate(f):
|
@@ -220,8 +129,14 @@ class EmotionsDataset(datasets.GeneratorBasedBuilder):
|
|
220 |
"label": label
|
221 |
})
|
222 |
yield example["id"], example
|
|
|
|
|
|
|
|
|
|
|
|
|
223 |
|
224 |
|
225 |
if __name__ == "__main__":
|
226 |
-
dataset = load_dataset("ma2za/many_emotions", name="
|
227 |
print()
|
|
|
1 |
import json
|
|
|
|
|
2 |
from typing import List
|
3 |
|
4 |
import datasets
|
|
|
5 |
from datasets import ClassLabel, Value, load_dataset
|
6 |
|
7 |
_URLS = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
}
|
9 |
|
10 |
_SUB_CLASSES = [
|
|
|
62 |
class EmotionsDataset(datasets.GeneratorBasedBuilder):
|
63 |
BUILDER_CONFIGS = [
|
64 |
EmotionsDatasetConfig(
|
65 |
+
name="raw",
|
66 |
+
label_classes=_SUB_CLASSES,
|
67 |
+
features=["text", "label", "dataset", "license", "language"]
|
68 |
),
|
69 |
EmotionsDatasetConfig(
|
70 |
+
name="split",
|
71 |
label_classes=_SUB_CLASSES,
|
72 |
+
features=["text", "label", "dataset", "license", "language"]
|
73 |
)
|
74 |
]
|
75 |
|
76 |
DEFAULT_CONFIG_NAME = "all"
|
77 |
|
78 |
def _info(self):
|
79 |
+
return datasets.DatasetInfo(
|
80 |
+
features=datasets.Features(
|
81 |
+
{
|
82 |
+
"id": datasets.Value("string"),
|
83 |
+
'text': Value(dtype='string', id=None),
|
84 |
+
'label': ClassLabel(names=_SUB_CLASSES, id=None),
|
85 |
+
'dataset': Value(dtype='string', id=None),
|
86 |
+
'license': Value(dtype='string', id=None),
|
87 |
+
'language': Value(dtype='string', id=None)
|
88 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
)
|
90 |
+
)
|
91 |
|
92 |
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
93 |
splits = []
|
94 |
+
if self.config.name == "raw":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
downloaded_files = dl_manager.download_and_extract(["data/many_emotions.json.gz"])
|
96 |
for lang in ["en", "fr", "it", "es", "de"]:
|
97 |
splits.append(datasets.SplitGenerator(name=lang,
|
98 |
gen_kwargs={"filepaths": downloaded_files,
|
99 |
"language": lang,
|
100 |
+
"dataset": "raw"}))
|
101 |
+
else:
|
102 |
+
for split in ["train", "validation", "test"]:
|
103 |
+
downloaded_files = dl_manager.download_and_extract([f"data/split_dataset_{split}.jsonl.gz"])
|
104 |
+
splits.append(datasets.SplitGenerator(name=split,
|
105 |
+
gen_kwargs={"filepaths": downloaded_files,
|
106 |
+
"dataset": "split"}))
|
107 |
return splits
|
108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
def _generate_examples(self, filepaths, dataset, license=None, language=None):
|
110 |
+
if dataset == "raw":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
for i, filepath in enumerate(filepaths):
|
112 |
with open(filepath, encoding="utf-8") as f:
|
113 |
for idx, line in enumerate(f):
|
|
|
129 |
"label": label
|
130 |
})
|
131 |
yield example["id"], example
|
132 |
+
else:
|
133 |
+
for i, filepath in enumerate(filepaths):
|
134 |
+
with open(filepath, encoding="utf-8") as f:
|
135 |
+
for idx, line in enumerate(f):
|
136 |
+
example = json.loads(line)
|
137 |
+
yield example["id"], example
|
138 |
|
139 |
|
140 |
if __name__ == "__main__":
|
141 |
+
dataset = load_dataset("ma2za/many_emotions", name="raw")
|
142 |
print()
|
requirements.txt
CHANGED
@@ -1,2 +1 @@
|
|
1 |
-
datasets
|
2 |
-
pandas
|
|
|
1 |
+
datasets
|
|