qgyd2021 commited on
Commit
beff547
1 Parent(s): f0c0724

[update]add covid_19

Browse files
README.md CHANGED
@@ -20,6 +20,7 @@ size_categories:
20
  | AFQMC | 汉语 | [AFQMC](https://tianchi.aliyun.com/dataset/106411) | TRAIN: 34334, VALID: 4316, TEST: 3861 | 蚂蚁金融语义相似度数据集,用于问题相似度计算。即:给定客服里用户描述的两句话,用算法来判断是否表示了相同的语义。 | |
21
  | BUSTM | 汉语 | [BUSTM](https://tianchi.aliyun.com/competition/entrance/531851/information); [BUSTM](https://github.com/xiaobu-coai/BUSTM) | 总样本数为:177173,其中,匹配样本个数为:54805,不匹配样本个数为:122368 | 小布助手对话短文本语义匹配比赛数据集 | [BUSTM](https://github.com/CLUEbenchmark/FewCLUE/tree/main/datasets/bustm) |
22
  | CHIP2019 | 汉语 | [CHIP2019](https://www.biendata.xyz/competition/chip2019/) | 2万 | 平安医疗科技疾病问答迁移学习比赛数据集 | |
 
23
 
24
 
25
  <details>
 
20
  | AFQMC | 汉语 | [AFQMC](https://tianchi.aliyun.com/dataset/106411) | TRAIN: 34334, VALID: 4316, TEST: 3861 | 蚂蚁金融语义相似度数据集,用于问题相似度计算。即:给定客服里用户描述的两句话,用算法来判断是否表示了相同的语义。 | |
21
  | BUSTM | 汉语 | [BUSTM](https://tianchi.aliyun.com/competition/entrance/531851/information); [BUSTM](https://github.com/xiaobu-coai/BUSTM) | 总样本数为:177173,其中,匹配样本个数为:54805,不匹配样本个数为:122368 | 小布助手对话短文本语义匹配比赛数据集 | [BUSTM](https://github.com/CLUEbenchmark/FewCLUE/tree/main/datasets/bustm) |
22
  | CHIP2019 | 汉语 | [CHIP2019](https://www.biendata.xyz/competition/chip2019/) | 2万 | 平安医疗科技疾病问答迁移学习比赛数据集 | |
23
+ | COVID-19 | 汉语 | [COVID-19](https://tianchi.aliyun.com/competition/entrance/231776/information) | | 天池新冠疫情相似句对判定大赛 | [COVID-19](https://gitee.com/liangzongchang/COVID-19-sentence-pair/) |
24
 
25
 
26
  <details>
data/covid_19.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:350a8e6c5c38760d13c9b6181b33335786585bfa9b416e71fcb242fa30ffdefd
3
+ size 2181571
examples/preprocess/process_covid_19.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ import json
5
+ import os
6
+ from pathlib import Path
7
+ import sys
8
+
9
+ pwd = os.path.abspath(os.path.dirname(__file__))
10
+ sys.path.append(os.path.join(pwd, '../../'))
11
+
12
+ from datasets import load_dataset
13
+ import pandas as pd
14
+ from tqdm import tqdm
15
+
16
+ from project_settings import project_path
17
+
18
+
19
+ def get_args():
20
+ parser = argparse.ArgumentParser()
21
+
22
+ parser.add_argument("--data_dir", default="./data/covid_19", type=str)
23
+ parser.add_argument(
24
+ "--output_file",
25
+ default=(project_path / "data/covid_19.jsonl"),
26
+ type=str
27
+ )
28
+
29
+ args = parser.parse_args()
30
+ return args
31
+
32
+
33
+ def main():
34
+ args = get_args()
35
+
36
+ data_dir = Path(args.data_dir)
37
+
38
+ train_filename = data_dir / "COVID-19/data/Dataset/train_20200228.csv"
39
+ valid_filename = data_dir / "COVID-19/data/Dataset/dev_20200228.csv"
40
+
41
+ df_train = pd.read_csv(train_filename)
42
+ df_valid = pd.read_csv(valid_filename)
43
+
44
+ with open(args.output_file, "w", encoding="utf-8") as f:
45
+ for i, row in df_train.iterrows():
46
+ category = row["category"]
47
+ query1 = row["query1"]
48
+ query2 = row["query2"]
49
+ label = row["label"]
50
+
51
+ label = str(int(label)) if label is not None else None
52
+
53
+ if label not in ("0", "1", None):
54
+ raise AssertionError
55
+
56
+ row = {
57
+ "sentence1": query1,
58
+ "sentence2": query2,
59
+ "label": label,
60
+ "category": category,
61
+ "data_source": "COVID-19",
62
+ "split": "train"
63
+ }
64
+
65
+ row = json.dumps(row, ensure_ascii=False)
66
+ f.write("{}\n".format(row))
67
+
68
+ for i, row in df_valid.iterrows():
69
+ category = row["category"]
70
+ query1 = row["query1"]
71
+ query2 = row["query2"]
72
+ label = row["label"]
73
+
74
+ label = str(int(label)) if label is not None else None
75
+
76
+ if label not in ("0", "1", None):
77
+ raise AssertionError
78
+
79
+ row = {
80
+ "sentence1": query1,
81
+ "sentence2": query2,
82
+ "label": label,
83
+ "category": category,
84
+ "data_source": "COVID-19",
85
+ "split": "validation"
86
+ }
87
+
88
+ row = json.dumps(row, ensure_ascii=False)
89
+ f.write("{}\n".format(row))
90
+
91
+ return
92
+
93
+
94
+ if __name__ == '__main__':
95
+ main()
sentence_pair.py CHANGED
@@ -9,8 +9,12 @@ import datasets
9
 
10
 
11
  _URLS = {
 
 
12
  "ccks2018_task3": "data/ccks2018_task3.jsonl",
13
  "chinese_sts": "data/chinese_sts.jsonl",
 
 
14
  "diac2019": "data/diac2019.jsonl",
15
  "lcqmc": "data/lcqmc.jsonl",
16
 
 
9
 
10
 
11
  _URLS = {
12
+ "afqmc": "data/afqmc.jsonl",
13
+ "bustm": "data/bustm.jsonl",
14
  "ccks2018_task3": "data/ccks2018_task3.jsonl",
15
  "chinese_sts": "data/chinese_sts.jsonl",
16
+ "chip2019": "data/chip2019.jsonl",
17
+ "covid_19": "data/covid_19.jsonl",
18
  "diac2019": "data/diac2019.jsonl",
19
  "lcqmc": "data/lcqmc.jsonl",
20