init
Browse files- process/tweet_qa.py +1 -1
- process/tweet_sim.py +1 -1
- process/tweet_topic.py +1 -1
- process/unify_sp_symbol.py +54 -39
- process/unify_sp_symbol_2.py +0 -44
process/tweet_qa.py
CHANGED
@@ -10,7 +10,7 @@ def process(tmp):
|
|
10 |
tmp = [i.to_dict() for _, i in tmp.iterrows()]
|
11 |
for i in tmp:
|
12 |
i['text'] = i.pop('paragraph_question')
|
13 |
-
i['
|
14 |
return tmp
|
15 |
|
16 |
train = process(data["train"].to_pandas())
|
|
|
10 |
tmp = [i.to_dict() for _, i in tmp.iterrows()]
|
11 |
for i in tmp:
|
12 |
i['text'] = i.pop('paragraph_question')
|
13 |
+
i['gold_label_str'] = i.pop('answer')
|
14 |
return tmp
|
15 |
|
16 |
train = process(data["train"].to_pandas())
|
process/tweet_sim.py
CHANGED
@@ -4,7 +4,7 @@ import pandas as pd
|
|
4 |
|
5 |
df = pd.read_csv("misc/tweet_sim.3anns-1k.tsv", sep="\t", index_col=None, header=None)
|
6 |
tmp = [i.tolist() for i in df.values]
|
7 |
-
tmp = [{"text_1": a, "text_2": b, "
|
8 |
seed(42)
|
9 |
shuffle(tmp)
|
10 |
train = tmp[:450]
|
|
|
4 |
|
5 |
df = pd.read_csv("misc/tweet_sim.3anns-1k.tsv", sep="\t", index_col=None, header=None)
|
6 |
tmp = [i.tolist() for i in df.values]
|
7 |
+
tmp = [{"text_1": a, "text_2": b, "gold_score": c} for a, b, c in tmp]
|
8 |
seed(42)
|
9 |
shuffle(tmp)
|
10 |
train = tmp[:450]
|
process/tweet_topic.py
CHANGED
@@ -10,7 +10,7 @@ def process(tmp):
|
|
10 |
tmp.pop("label_name")
|
11 |
tmp = [i.to_dict() for _, i in tmp.iterrows()]
|
12 |
for i in tmp:
|
13 |
-
i['
|
14 |
return tmp
|
15 |
|
16 |
train = process(data["train_2020"].to_pandas())
|
|
|
10 |
tmp.pop("label_name")
|
11 |
tmp = [i.to_dict() for _, i in tmp.iterrows()]
|
12 |
for i in tmp:
|
13 |
+
i['gold_label_list'] = i.pop('label').tolist()
|
14 |
return tmp
|
15 |
|
16 |
train = process(data["train_2020"].to_pandas())
|
process/unify_sp_symbol.py
CHANGED
@@ -3,42 +3,57 @@ import re
|
|
3 |
|
4 |
from glob import glob
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
#
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
from glob import glob
|
5 |
|
6 |
+
for i in glob("data/tweet_intimacy/*.jsonl"):
|
7 |
+
with open(i) as f:
|
8 |
+
data = [json.loads(j) for j in f.readlines()]
|
9 |
+
for d in data:
|
10 |
+
for c in ['text']:
|
11 |
+
tmp = d[c]
|
12 |
+
tmp = re.sub(r"\bhttp\b", "{{URL}}", tmp)
|
13 |
+
tmp = re.sub(r"@user", "{{USERNAME}}", tmp)
|
14 |
+
tmp = re.sub(r"(@[\S]+)", r"{\1@}", tmp)
|
15 |
+
d[c] = tmp
|
16 |
+
|
17 |
+
with open(i, "w") as f:
|
18 |
+
f.write("\n".join([json.dumps(j) for j in data]))
|
19 |
+
|
20 |
+
for i in glob("data/tweet_qa/*.jsonl"):
|
21 |
+
with open(i) as f:
|
22 |
+
data = [json.loads(j) for j in f.readlines()]
|
23 |
+
for d in data:
|
24 |
+
for c in ['text', "paragraph", "question", "label_str"]:
|
25 |
+
tmp = d[c]
|
26 |
+
tmp = re.sub(r"(@[\S]+)", r"{\1@}", tmp)
|
27 |
+
tmp = tmp.replace(")@}", '@})')
|
28 |
+
d[c] = tmp
|
29 |
+
|
30 |
+
with open(i, "w") as f:
|
31 |
+
f.write("\n".join([json.dumps(j) for j in data]))
|
32 |
+
|
33 |
+
for i in glob("data/tweet_similarity/*.jsonl"):
|
34 |
+
with open(i) as f:
|
35 |
+
data = [json.loads(j) for j in f.readlines()]
|
36 |
+
for d in data:
|
37 |
+
for c in ['text_1', "text_2"]:
|
38 |
+
tmp = d[c]
|
39 |
+
# tmp = re.sub(r"(@[\S]+)\b", r"{\1@}", tmp)
|
40 |
+
tmp = tmp.replace("{@user@}", "{{USERNAME}}")
|
41 |
+
d[c] = tmp
|
42 |
+
|
43 |
+
with open(i, "w") as f:
|
44 |
+
f.write("\n".join([json.dumps(j) for j in data]))
|
45 |
+
|
46 |
+
|
47 |
+
for i in glob("data/tweet_intimacy/*.jsonl"):
|
48 |
+
with open(i) as f:
|
49 |
+
data = [json.loads(j) for j in f.readlines()]
|
50 |
+
for d in data:
|
51 |
+
for c in ['text']:
|
52 |
+
tmp = d[c].replace("{{URL}}", "@url")
|
53 |
+
# tmp = re.sub(r"\bhttp\b", "{{URL}}", tmp)
|
54 |
+
tmp = re.sub(r"@user", "{{USERNAME}}", tmp)
|
55 |
+
tmp = re.sub(r"(@[\S]+)", r"{\1@}", tmp)
|
56 |
+
d[c] = tmp
|
57 |
+
|
58 |
+
with open(i, "w") as f:
|
59 |
+
f.write("\n".join([json.dumps(j) for j in data]))
|
process/unify_sp_symbol_2.py
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import re
|
3 |
-
|
4 |
-
from glob import glob
|
5 |
-
|
6 |
-
for i in glob("data/tweet_intimacy/*.jsonl"):
|
7 |
-
with open(i) as f:
|
8 |
-
data = [json.loads(j) for j in f.readlines()]
|
9 |
-
for d in data:
|
10 |
-
for c in ['text']:
|
11 |
-
tmp = d[c].replace("{{URL}}", "@url")
|
12 |
-
# tmp = re.sub(r"\bhttp\b", "{{URL}}", tmp)
|
13 |
-
tmp = re.sub(r"@user", "{{USERNAME}}", tmp)
|
14 |
-
tmp = re.sub(r"(@[\S]+)", r"{\1@}", tmp)
|
15 |
-
d[c] = tmp
|
16 |
-
|
17 |
-
with open(i, "w") as f:
|
18 |
-
f.write("\n".join([json.dumps(j) for j in data]))
|
19 |
-
|
20 |
-
# for i in glob("data/tweet_qa/*.jsonl"):
|
21 |
-
# with open(i) as f:
|
22 |
-
# data = [json.loads(j) for j in f.readlines()]
|
23 |
-
# for d in data:
|
24 |
-
# for c in ['text', "paragraph", "question", "label_str"]:
|
25 |
-
# tmp = d[c]
|
26 |
-
# tmp = re.sub(r"(@[\S]+)", r"{\1@}", tmp)
|
27 |
-
# tmp = tmp.replace(")@}", '@})')
|
28 |
-
# d[c] = tmp
|
29 |
-
#
|
30 |
-
# with open(i, "w") as f:
|
31 |
-
# f.write("\n".join([json.dumps(j) for j in data]))
|
32 |
-
|
33 |
-
# for i in glob("data/tweet_similarity/*.jsonl"):
|
34 |
-
# with open(i) as f:
|
35 |
-
# data = [json.loads(j) for j in f.readlines()]
|
36 |
-
# for d in data:
|
37 |
-
# for c in ['text_1', "text_2"]:
|
38 |
-
# tmp = d[c]
|
39 |
-
# # tmp = re.sub(r"(@[\S]+)\b", r"{\1@}", tmp)
|
40 |
-
# tmp = tmp.replace("{@user@}", "{{USERNAME}}")
|
41 |
-
# d[c] = tmp
|
42 |
-
#
|
43 |
-
# with open(i, "w") as f:
|
44 |
-
# f.write("\n".join([json.dumps(j) for j in data]))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|