init
Browse files- data/tempo_wic/test.jsonl +0 -0
- data/tempo_wic/train.jsonl +0 -0
- data/tempo_wic/validation.jsonl +0 -0
- process/tempo_wic.py +33 -0
- super_tweet_eval.py +36 -0
data/tempo_wic/test.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/tempo_wic/train.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/tempo_wic/validation.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
process/tempo_wic.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import pandas as pd
|
4 |
+
|
5 |
+
os.makedirs("data/tempo_wic", exist_ok=True)
|
6 |
+
|
7 |
+
for s in ['train', 'validation', 'test']:
|
8 |
+
if s == 'test':
|
9 |
+
with open(f"misc/TempoWiC/data/test-codalab-10k.data.jl") as f:
|
10 |
+
data = pd.DataFrame([json.loads(i) for i in f.read().split("\n") if len(i) > 0])
|
11 |
+
df = pd.read_csv(f"misc/TempoWiC/data/test.gold.tsv", sep="\t")
|
12 |
+
else:
|
13 |
+
with open(f"misc/TempoWiC/data/{s}.data.jl") as f:
|
14 |
+
data = pd.DataFrame([json.loads(i) for i in f.read().split("\n") if len(i) > 0])
|
15 |
+
df = pd.read_csv(f"misc/TempoWiC/data/{s}.labels.tsv", sep="\t")
|
16 |
+
df.columns = ["id", "label"]
|
17 |
+
df.index = df.pop("id")
|
18 |
+
data = data[[i in df.index for i in data['id']]]
|
19 |
+
data['label'] = [df.loc[i].values[0] for i in data['id'] if i in df.index]
|
20 |
+
assert len(df) == len(data)
|
21 |
+
|
22 |
+
data_jl = []
|
23 |
+
for _, i in data.iterrows():
|
24 |
+
i = i.to_dict()
|
25 |
+
tmp = {"id": i['id'], "word": i["word"], "label_binary": i["label"]}
|
26 |
+
tmp.update({f"{k}_1": v for k, v in i['tweet1'].items()})
|
27 |
+
tmp.update({f"{k}_2": v for k, v in i['tweet2'].items()})
|
28 |
+
tmp['text_1_tokenized'] = tmp.pop('tokens_1')
|
29 |
+
tmp['text_2_tokenized'] = tmp.pop('tokens_2')
|
30 |
+
data_jl.append(tmp)
|
31 |
+
with open(f"data/tempo_wic/{s}.jsonl", "w") as f:
|
32 |
+
f.write("\n".join([json.dumps(i) for i in data_jl]))
|
33 |
+
|
super_tweet_eval.py
CHANGED
@@ -79,6 +79,29 @@ _TWEET_INTIMACY_CITATION = """\
|
|
79 |
"""
|
80 |
_TWEET_SIMILARITY_DESCRIPTION = """TBA"""
|
81 |
_TWEET_SIMILARITY_CITATION = """TBA"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
|
84 |
class SuperTweetEvalConfig(datasets.BuilderConfig):
|
@@ -143,6 +166,15 @@ class SuperTweetEval(datasets.GeneratorBasedBuilder):
|
|
143 |
citation=_TWEET_SIMILARITY_CITATION,
|
144 |
features=["text_1", "text_2", "label_float"],
|
145 |
data_url="https://huggingface.co/datasets/cardiffnlp/super_tweet_eval/resolve/main/data/tweet_similarity",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
)
|
147 |
]
|
148 |
|
@@ -163,6 +195,10 @@ class SuperTweetEval(datasets.GeneratorBasedBuilder):
|
|
163 |
features["text_tokenized"] = datasets.Sequence(datasets.Value("string"))
|
164 |
if self.config.name in ["tweet_intimacy", "tweet_similarity"]:
|
165 |
features["label_float"] = datasets.Value("float32")
|
|
|
|
|
|
|
|
|
166 |
|
167 |
return datasets.DatasetInfo(
|
168 |
description=_SUPER_TWEET_EVAL_DESCRIPTION + "\n" + self.config.description,
|
|
|
79 |
"""
|
80 |
_TWEET_SIMILARITY_DESCRIPTION = """TBA"""
|
81 |
_TWEET_SIMILARITY_CITATION = """TBA"""
|
82 |
+
_TEMPO_WIC_DESCRIPTION = """TBA"""
|
83 |
+
_TEMPO_WIC_CITATION = """\
|
84 |
+
@inproceedings{loureiro-etal-2022-tempowic,
|
85 |
+
title = "{T}empo{W}i{C}: An Evaluation Benchmark for Detecting Meaning Shift in Social Media",
|
86 |
+
author = "Loureiro, Daniel and
|
87 |
+
D{'}Souza, Aminette and
|
88 |
+
Muhajab, Areej Nasser and
|
89 |
+
White, Isabella A. and
|
90 |
+
Wong, Gabriel and
|
91 |
+
Espinosa-Anke, Luis and
|
92 |
+
Neves, Leonardo and
|
93 |
+
Barbieri, Francesco and
|
94 |
+
Camacho-Collados, Jose",
|
95 |
+
booktitle = "Proceedings of the 29th International Conference on Computational Linguistics",
|
96 |
+
month = oct,
|
97 |
+
year = "2022",
|
98 |
+
address = "Gyeongju, Republic of Korea",
|
99 |
+
publisher = "International Committee on Computational Linguistics",
|
100 |
+
url = "https://aclanthology.org/2022.coling-1.296",
|
101 |
+
pages = "3353--3359",
|
102 |
+
abstract = "Language evolves over time, and word meaning changes accordingly. This is especially true in social media, since its dynamic nature leads to faster semantic shifts, making it challenging for NLP models to deal with new content and trends. However, the number of datasets and models that specifically address the dynamic nature of these social platforms is scarce. To bridge this gap, we present TempoWiC, a new benchmark especially aimed at accelerating research in social media-based meaning shift. Our results show that TempoWiC is a challenging benchmark, even for recently-released language models specialized in social media.",
|
103 |
+
}
|
104 |
+
"""
|
105 |
|
106 |
|
107 |
class SuperTweetEvalConfig(datasets.BuilderConfig):
|
|
|
166 |
citation=_TWEET_SIMILARITY_CITATION,
|
167 |
features=["text_1", "text_2", "label_float"],
|
168 |
data_url="https://huggingface.co/datasets/cardiffnlp/super_tweet_eval/resolve/main/data/tweet_similarity",
|
169 |
+
),
|
170 |
+
SuperTweetEvalConfig(
|
171 |
+
name="tempo_wic",
|
172 |
+
description=_TEMPO_WIC_DESCRIPTION,
|
173 |
+
citation=_TEMPO_WIC_CITATION,
|
174 |
+
features=['label_binary', 'id', 'word',
|
175 |
+
'text_1', 'text_tokenized_1', 'token_idx_1', 'text_start_1', 'text_end_1', 'date_1',
|
176 |
+
'text_2', 'text_tokenized_2', 'token_idx_2', 'text_start_2', 'text_end_2', 'date_2'],
|
177 |
+
data_url="https://huggingface.co/datasets/cardiffnlp/super_tweet_eval/resolve/main/data/tempo_wic",
|
178 |
)
|
179 |
]
|
180 |
|
|
|
195 |
features["text_tokenized"] = datasets.Sequence(datasets.Value("string"))
|
196 |
if self.config.name in ["tweet_intimacy", "tweet_similarity"]:
|
197 |
features["label_float"] = datasets.Value("float32")
|
198 |
+
if self.config.name == "tempo_wic":
|
199 |
+
features["label_binary"] = datasets.Value("int32")
|
200 |
+
features["text_tokenized_1"] = datasets.Sequence(datasets.Value("string"))
|
201 |
+
features["text_tokenized_2"] = datasets.Sequence(datasets.Value("string"))
|
202 |
|
203 |
return datasets.DatasetInfo(
|
204 |
description=_SUPER_TWEET_EVAL_DESCRIPTION + "\n" + self.config.description,
|