File size: 1,450 Bytes
5399d79 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import json
import re
from glob import glob
for i in glob("data/tweet_intimacy/*.jsonl"):
with open(i) as f:
data = [json.loads(j) for j in f.readlines()]
for d in data:
for c in ['text']:
tmp = d[c].replace("{{URL}}", "@url")
# tmp = re.sub(r"\bhttp\b", "{{URL}}", tmp)
tmp = re.sub(r"@user", "{{USERNAME}}", tmp)
tmp = re.sub(r"(@[\S]+)", r"{\1@}", tmp)
d[c] = tmp
with open(i, "w") as f:
f.write("\n".join([json.dumps(j) for j in data]))
# for i in glob("data/tweet_qa/*.jsonl"):
# with open(i) as f:
# data = [json.loads(j) for j in f.readlines()]
# for d in data:
# for c in ['text', "paragraph", "question", "label_str"]:
# tmp = d[c]
# tmp = re.sub(r"(@[\S]+)", r"{\1@}", tmp)
# tmp = tmp.replace(")@}", '@})')
# d[c] = tmp
#
# with open(i, "w") as f:
# f.write("\n".join([json.dumps(j) for j in data]))
# for i in glob("data/tweet_similarity/*.jsonl"):
# with open(i) as f:
# data = [json.loads(j) for j in f.readlines()]
# for d in data:
# for c in ['text_1', "text_2"]:
# tmp = d[c]
# # tmp = re.sub(r"(@[\S]+)\b", r"{\1@}", tmp)
# tmp = tmp.replace("{@user@}", "{{USERNAME}}")
# d[c] = tmp
#
# with open(i, "w") as f:
# f.write("\n".join([json.dumps(j) for j in data]))
|