init
Browse files- data/tempo_wic/test.jsonl +2 -2
- data/tempo_wic/train.jsonl +2 -2
- data/tempo_wic/validation.jsonl +2 -2
- data/tweet_intimacy/test.jsonl +2 -2
- data/tweet_intimacy/train.jsonl +2 -2
- data/tweet_intimacy/validation.jsonl +2 -2
- process/tempo_wic.py +8 -3
- process/tweet_intimacy.py +2 -2
- process/tweet_ner.py +4 -1
- process/unify_sp_symbol_2.py +44 -0
data/tempo_wic/test.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:975f67ba599362ef3bea5d749932abe572f2443e73b1d8e4e8a753cd5acd6162
|
3 |
+
size 1284151
|
data/tempo_wic/train.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3972efcca73ec01c81feceec04e8fa2efa9e00d33f827decb8e8a7033c48507d
|
3 |
+
size 1439969
|
data/tempo_wic/validation.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b44124d18d379d1b8330a2c3872cd5ea0eec0b6fdac2d3eb8d268f652a274b6c
|
3 |
+
size 347362
|
data/tweet_intimacy/test.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:38e723fe91f2f18ec346c82aff75ac7323fc4098b789d9b266d581945c1be37a
|
3 |
+
size 38638
|
data/tweet_intimacy/train.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1af59bd18d4443e1276a7462e8fd15ddf62f62236a16b54770203e7ac840879b
|
3 |
+
size 119898
|
data/tweet_intimacy/validation.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:56f3b4d36f87ca43a7eb3033775194169ef3e48bf11ac6c4b943ae194f035357
|
3 |
+
size 40087
|
process/tempo_wic.py
CHANGED
@@ -22,12 +22,17 @@ for s in ['train', 'validation', 'test']:
|
|
22 |
data_jl = []
|
23 |
for _, i in data.iterrows():
|
24 |
i = i.to_dict()
|
25 |
-
tmp = {"
|
26 |
tmp.update({f"{k}_1": v for k, v in i['tweet1'].items()})
|
27 |
-
tmp.update({f"{k}_2": v for k, v in i['tweet2'].items()})
|
28 |
tmp['text_1_tokenized'] = tmp.pop('tokens_1')
|
|
|
29 |
tmp['text_2_tokenized'] = tmp.pop('tokens_2')
|
|
|
|
|
|
|
|
|
|
|
30 |
data_jl.append(tmp)
|
|
|
31 |
with open(f"data/tempo_wic/{s}.jsonl", "w") as f:
|
32 |
f.write("\n".join([json.dumps(i) for i in data_jl]))
|
33 |
-
|
|
|
22 |
data_jl = []
|
23 |
for _, i in data.iterrows():
|
24 |
i = i.to_dict()
|
25 |
+
tmp = {"word": i["word"], "gold_label_binary": i["label"]}
|
26 |
tmp.update({f"{k}_1": v for k, v in i['tweet1'].items()})
|
|
|
27 |
tmp['text_1_tokenized'] = tmp.pop('tokens_1')
|
28 |
+
tmp.update({f"{k}_2": v for k, v in i['tweet2'].items()})
|
29 |
tmp['text_2_tokenized'] = tmp.pop('tokens_2')
|
30 |
+
tmp.pop("id")
|
31 |
+
tmp.pop("text_start_1")
|
32 |
+
tmp.pop("text_end_1")
|
33 |
+
tmp.pop("text_start_2")
|
34 |
+
tmp.pop("text_end_2")
|
35 |
data_jl.append(tmp)
|
36 |
+
|
37 |
with open(f"data/tempo_wic/{s}.jsonl", "w") as f:
|
38 |
f.write("\n".join([json.dumps(i) for i in data_jl]))
|
|
process/tweet_intimacy.py
CHANGED
@@ -9,14 +9,14 @@ df_test = df_test[df_test['language'] == 'English']
|
|
9 |
df_test.pop("language")
|
10 |
test = [i.to_dict() for _, i in df_test.iterrows()]
|
11 |
for i in test:
|
12 |
-
i['
|
13 |
|
14 |
df_train = pd.read_csv("misc/multilingual_tweet_intimacy/train.csv")
|
15 |
df_train = df_train[df_train['language'] == 'English']
|
16 |
df_train.pop("language")
|
17 |
train = [i.to_dict() for _, i in df_train.iterrows()]
|
18 |
for i in train:
|
19 |
-
i['
|
20 |
seed(42)
|
21 |
shuffle(train)
|
22 |
val = train[:len(test)]
|
|
|
9 |
df_test.pop("language")
|
10 |
test = [i.to_dict() for _, i in df_test.iterrows()]
|
11 |
for i in test:
|
12 |
+
i['gold_score'] = i.pop("label")
|
13 |
|
14 |
df_train = pd.read_csv("misc/multilingual_tweet_intimacy/train.csv")
|
15 |
df_train = df_train[df_train['language'] == 'English']
|
16 |
df_train.pop("language")
|
17 |
train = [i.to_dict() for _, i in df_train.iterrows()]
|
18 |
for i in train:
|
19 |
+
i['gold_score'] = i.pop("label")
|
20 |
seed(42)
|
21 |
shuffle(train)
|
22 |
val = train[:len(test)]
|
process/tweet_ner.py
CHANGED
@@ -10,10 +10,13 @@ def process(tmp):
|
|
10 |
tmp.pop("label_name")
|
11 |
tmp = [i.to_dict() for _, i in tmp.iterrows()]
|
12 |
for i in tmp:
|
13 |
-
i
|
|
|
14 |
i['text_tokenized'] = i.pop('token').tolist()
|
|
|
15 |
return tmp
|
16 |
|
|
|
17 |
train = process(data["train_2020"].to_pandas())
|
18 |
val = process(data["validation_2020"].to_pandas())
|
19 |
test = process(data["test_2021"].to_pandas())
|
|
|
10 |
tmp.pop("label_name")
|
11 |
tmp = [i.to_dict() for _, i in tmp.iterrows()]
|
12 |
for i in tmp:
|
13 |
+
i.pop("id")
|
14 |
+
i['gold_label_sequence'] = i.pop('tags').tolist()
|
15 |
i['text_tokenized'] = i.pop('token').tolist()
|
16 |
+
i['text'] = ' '.join(i['text_tokenized'])
|
17 |
return tmp
|
18 |
|
19 |
+
|
20 |
train = process(data["train_2020"].to_pandas())
|
21 |
val = process(data["validation_2020"].to_pandas())
|
22 |
test = process(data["test_2021"].to_pandas())
|
process/unify_sp_symbol_2.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import re
|
3 |
+
|
4 |
+
from glob import glob
|
5 |
+
|
6 |
+
for i in glob("data/tweet_intimacy/*.jsonl"):
|
7 |
+
with open(i) as f:
|
8 |
+
data = [json.loads(j) for j in f.readlines()]
|
9 |
+
for d in data:
|
10 |
+
for c in ['text']:
|
11 |
+
tmp = d[c].replace("{{URL}}", "@url")
|
12 |
+
# tmp = re.sub(r"\bhttp\b", "{{URL}}", tmp)
|
13 |
+
tmp = re.sub(r"@user", "{{USERNAME}}", tmp)
|
14 |
+
tmp = re.sub(r"(@[\S]+)", r"{\1@}", tmp)
|
15 |
+
d[c] = tmp
|
16 |
+
|
17 |
+
with open(i, "w") as f:
|
18 |
+
f.write("\n".join([json.dumps(j) for j in data]))
|
19 |
+
|
20 |
+
# for i in glob("data/tweet_qa/*.jsonl"):
|
21 |
+
# with open(i) as f:
|
22 |
+
# data = [json.loads(j) for j in f.readlines()]
|
23 |
+
# for d in data:
|
24 |
+
# for c in ['text', "paragraph", "question", "label_str"]:
|
25 |
+
# tmp = d[c]
|
26 |
+
# tmp = re.sub(r"(@[\S]+)", r"{\1@}", tmp)
|
27 |
+
# tmp = tmp.replace(")@}", '@})')
|
28 |
+
# d[c] = tmp
|
29 |
+
#
|
30 |
+
# with open(i, "w") as f:
|
31 |
+
# f.write("\n".join([json.dumps(j) for j in data]))
|
32 |
+
|
33 |
+
# for i in glob("data/tweet_similarity/*.jsonl"):
|
34 |
+
# with open(i) as f:
|
35 |
+
# data = [json.loads(j) for j in f.readlines()]
|
36 |
+
# for d in data:
|
37 |
+
# for c in ['text_1', "text_2"]:
|
38 |
+
# tmp = d[c]
|
39 |
+
# # tmp = re.sub(r"(@[\S]+)\b", r"{\1@}", tmp)
|
40 |
+
# tmp = tmp.replace("{@user@}", "{{USERNAME}}")
|
41 |
+
# d[c] = tmp
|
42 |
+
#
|
43 |
+
# with open(i, "w") as f:
|
44 |
+
# f.write("\n".join([json.dumps(j) for j in data]))
|