added sentiment data
Browse files
data/tweet_sentiment/test.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/tweet_sentiment/train.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/tweet_sentiment/validation.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
process/tweet_sentiment.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Original data: https://alt.qcri.org/semeval2017/task4/index.php?id=results
|
2 |
+
import pandas as pd
|
3 |
+
from glob import glob
|
4 |
+
import urllib
|
5 |
+
|
6 |
+
|
7 |
+
# format text
|
8 |
+
def clean_text(text):
|
9 |
+
text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
|
10 |
+
|
11 |
+
new_text = []
|
12 |
+
for t in text.split():
|
13 |
+
# MAKE SURE to check lowercase
|
14 |
+
t = '@user' if t.startswith('@') and len(t) > 1 and t.replace(
|
15 |
+
'@', '').lower() not in verified_users else t
|
16 |
+
t = '\{URL\}' if t.startswith('http') else t
|
17 |
+
new_text.append(t)
|
18 |
+
|
19 |
+
return ' '.join(new_text)
|
20 |
+
|
21 |
+
|
22 |
+
# test set
|
23 |
+
with open('./SemEval2017-task4-test.subtask-CE.english.txt') as f:
|
24 |
+
test_lines = f.readlines()
|
25 |
+
test = [x.split('\t') for x in test_lines]
|
26 |
+
test = pd.DataFrame(test, columns=['id', 'topic', 'gold_label', 'text'])
|
27 |
+
|
28 |
+
# validation set
|
29 |
+
fnames = ['twitter-2016dev-CE.txt', 'twitter-2016devtest-CE.txt']
|
30 |
+
|
31 |
+
validation_lines = []
|
32 |
+
for input_f in fnames:
|
33 |
+
with open(input_f) as f:
|
34 |
+
lines = f.readlines()
|
35 |
+
validation_lines.extend(lines)
|
36 |
+
|
37 |
+
validation = [x.split('\t') for x in validation_lines]
|
38 |
+
validation = pd.DataFrame(
|
39 |
+
validation, columns=['id', 'topic', 'gold_label', 'text'])
|
40 |
+
|
41 |
+
# train set
|
42 |
+
fnames = ['./twitter-2016train-CE.txt', './twitter-2016test-CE.txt']
|
43 |
+
|
44 |
+
train_lines = []
|
45 |
+
for input_f in fnames:
|
46 |
+
with open(input_f) as f:
|
47 |
+
lines = f.readlines()
|
48 |
+
train_lines.extend(lines)
|
49 |
+
|
50 |
+
train = [x.split('\t') for x in train_lines]
|
51 |
+
train = pd.DataFrame(
|
52 |
+
train, columns=['id', 'topic', 'gold_label', 'text'])
|
53 |
+
|
54 |
+
# clean text
|
55 |
+
verified_users = urllib.request.urlopen(
|
56 |
+
'https://raw.githubusercontent.com/cardiffnlp/timelms/main/data/verified_users.v091122.txt').readlines()
|
57 |
+
verified_users = [x.decode().strip('\n').lower() for x in verified_users]
|
58 |
+
|
59 |
+
train['text'] = train['text'].apply(clean_text)
|
60 |
+
validation['text'] = validation['text'].apply(clean_text)
|
61 |
+
test['text'] = test['text'].apply(clean_text)
|
62 |
+
|
63 |
+
# save splits
|
64 |
+
cols_to_keep = ['gold_label', 'topic', 'text']
|
65 |
+
train[cols_to_keep].to_json(
|
66 |
+
'../data/tweet_sentiment/train.jsonl', lines=True, orient='records')
|
67 |
+
validation[cols_to_keep].to_json(
|
68 |
+
'../data/tweet_sentiment/validation.jsonl', lines=True, orient='records')
|
69 |
+
test[cols_to_keep].to_json(
|
70 |
+
'../data/tweet_sentiment/test.jsonl', lines=True, orient='records')
|
super_tweet_eval.py
CHANGED
@@ -133,6 +133,24 @@ _TWEET_DISAMBIGUATION_CITATION = """\
|
|
133 |
"""
|
134 |
_TWEET_EMOJI_DESCRIPTION = """TBA"""
|
135 |
_TWEET_EMOJI_CITATION = """TBA"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
|
137 |
|
138 |
class SuperTweetEvalConfig(datasets.BuilderConfig):
|
@@ -227,6 +245,13 @@ class SuperTweetEval(datasets.GeneratorBasedBuilder):
|
|
227 |
citation=_TWEET_EMOJI_CITATION,
|
228 |
features=['gold_label', 'text'],
|
229 |
data_url="https://huggingface.co/datasets/cardiffnlp/super_tweet_eval/resolve/main/data/tweet_emoji",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
230 |
)
|
231 |
]
|
232 |
|
@@ -270,6 +295,11 @@ class SuperTweetEval(datasets.GeneratorBasedBuilder):
|
|
270 |
if self.config.name == "tweet_emoji":
|
271 |
features["gold_label"] = datasets.Value("int32")
|
272 |
features["text"] = datasets.Value("string")
|
|
|
|
|
|
|
|
|
|
|
273 |
|
274 |
return datasets.DatasetInfo(
|
275 |
description=_SUPER_TWEET_EVAL_DESCRIPTION + "\n" + self.config.description,
|
|
|
133 |
"""
|
134 |
_TWEET_EMOJI_DESCRIPTION = """TBA"""
|
135 |
_TWEET_EMOJI_CITATION = """TBA"""
|
136 |
+
_TWEET_SENTIMENT_DESCRIPTION = """TBA"""
|
137 |
+
_TWEET_SENTIMENT_CITATION = """\
|
138 |
+
@inproceedings{rosenthal-etal-2017-semeval,
|
139 |
+
title = "{S}em{E}val-2017 Task 4: Sentiment Analysis in {T}witter",
|
140 |
+
author = "Rosenthal, Sara and
|
141 |
+
Farra, Noura and
|
142 |
+
Nakov, Preslav",
|
143 |
+
booktitle = "Proceedings of the 11th International Workshop on Semantic Evaluation ({S}em{E}val-2017)",
|
144 |
+
month = aug,
|
145 |
+
year = "2017",
|
146 |
+
address = "Vancouver, Canada",
|
147 |
+
publisher = "Association for Computational Linguistics",
|
148 |
+
url = "https://aclanthology.org/S17-2088",
|
149 |
+
doi = "10.18653/v1/S17-2088",
|
150 |
+
pages = "502--518",
|
151 |
+
abstract = "This paper describes the fifth year of the Sentiment Analysis in Twitter task. SemEval-2017 Task 4 continues with a rerun of the subtasks of SemEval-2016 Task 4, which include identifying the overall sentiment of the tweet, sentiment towards a topic with classification on a two-point and on a five-point ordinal scale, and quantification of the distribution of sentiment towards a topic across a number of tweets: again on a two-point and on a five-point ordinal scale. Compared to 2016, we made two changes: (i) we introduced a new language, Arabic, for all subtasks, and (ii) we made available information from the profiles of the Twitter users who posted the target tweets. The task continues to be very popular, with a total of 48 teams participating this year.",
|
152 |
+
}
|
153 |
+
"""
|
154 |
|
155 |
|
156 |
class SuperTweetEvalConfig(datasets.BuilderConfig):
|
|
|
245 |
citation=_TWEET_EMOJI_CITATION,
|
246 |
features=['gold_label', 'text'],
|
247 |
data_url="https://huggingface.co/datasets/cardiffnlp/super_tweet_eval/resolve/main/data/tweet_emoji",
|
248 |
+
),
|
249 |
+
SuperTweetEvalConfig(
|
250 |
+
name="tweet_sentiment",
|
251 |
+
description=_TWEET_SENTIMENT_DESCRIPTION,
|
252 |
+
citation=_TWEET_SENTIMENT_CITATION,
|
253 |
+
features=['gold_label', 'topic', 'text'],
|
254 |
+
data_url="https://huggingface.co/datasets/cardiffnlp/super_tweet_eval/resolve/main/data/tweet_emoji",
|
255 |
)
|
256 |
]
|
257 |
|
|
|
295 |
if self.config.name == "tweet_emoji":
|
296 |
features["gold_label"] = datasets.Value("int32")
|
297 |
features["text"] = datasets.Value("string")
|
298 |
+
if self.config.name == "tweet_sentiment":
|
299 |
+
features["gold_label"] = datasets.Value("int32")
|
300 |
+
features["text"] = datasets.Value("string")
|
301 |
+
features["topic"] = datasets.Value("string")
|
302 |
+
|
303 |
|
304 |
return datasets.DatasetInfo(
|
305 |
description=_SUPER_TWEET_EVAL_DESCRIPTION + "\n" + self.config.description,
|