qgyd2021 commited on
Commit
f0c0724
1 Parent(s): 1ffbd69

[update]add data

Browse files
README.md CHANGED
@@ -18,18 +18,16 @@ size_categories:
18
  | DIAC2019 | 汉语 | [DIAC2019](https://www.biendata.xyz/competition/2019diac/data/) | 6K | 以问题组的形式提供,每组问句又分为等价部分和不等价部分,等价问句之间互相组合可以生成正样本,等价问句和不等价问句之间互相组合可以生成负样本。我们提供6000组问句的训练集。 | |
19
  | LCQMC | 汉语 | [LCQMC](https://www.luge.ai/#/luge/dataDetail?id=14); [C18-1166.pdf](https://aclanthology.org/C18-1166.pdf) | TRAIN: 238766, VALID: 8802, TEST: 12500 | 百度知道领域的中文问题匹配数据集,目的是为了解决在中文领域大规模问题匹配数据集的缺失。该数据集从百度知道不同领域的用户问题中抽取构建数据。| [lcqmc_data](https://github.com/xiaohai-AI/lcqmc_data) |
20
  | AFQMC | 汉语 | [AFQMC](https://tianchi.aliyun.com/dataset/106411) | TRAIN: 34334, VALID: 4316, TEST: 3861 | 蚂蚁金融语义相似度数据集,用于问题相似度计算。即:给定客服里用户描述的两句话,用算法来判断是否表示了相同的语义。 | |
 
 
21
 
22
 
23
  <details>
24
  <summary>参考的数据来源,展开查看</summary>
25
  <pre><code>
26
-
27
  https://github.com/liucongg/NLPDataSet
28
 
29
  https://huggingface.co/datasets/tiansz/ChineseSTS
30
  https://zhuanlan.zhihu.com/p/454173790
31
-
32
-
33
  </code></pre>
34
  </details>
35
-
 
18
  | DIAC2019 | 汉语 | [DIAC2019](https://www.biendata.xyz/competition/2019diac/data/) | 6K | 以问题组的形式提供,每组问句又分为等价部分和不等价部分,等价问句之间互相组合可以生成正样本,等价问句和不等价问句之间互相组合可以生成负样本。我们提供6000组问句的训练集。 | |
19
  | LCQMC | 汉语 | [LCQMC](https://www.luge.ai/#/luge/dataDetail?id=14); [C18-1166.pdf](https://aclanthology.org/C18-1166.pdf) | TRAIN: 238766, VALID: 8802, TEST: 12500 | 百度知道领域的中文问题匹配数据集,目的是为了解决在中文领域大规模问题匹配数据集的缺失。该数据集从百度知道不同领域的用户问题中抽取构建数据。| [lcqmc_data](https://github.com/xiaohai-AI/lcqmc_data) |
20
  | AFQMC | 汉语 | [AFQMC](https://tianchi.aliyun.com/dataset/106411) | TRAIN: 34334, VALID: 4316, TEST: 3861 | 蚂蚁金融语义相似度数据集,用于问题相似度计算。即:给定客服里用户描述的两句话,用算法来判断是否表示了相同的语义。 | |
21
+ | BUSTM | 汉语 | [BUSTM](https://tianchi.aliyun.com/competition/entrance/531851/information); [BUSTM](https://github.com/xiaobu-coai/BUSTM) | 总样本数为:177173,其中,匹配样本个数为:54805,不匹配样本个数为:122368 | 小布助手对话短文本语义匹配比赛数据集 | [BUSTM](https://github.com/CLUEbenchmark/FewCLUE/tree/main/datasets/bustm) |
22
+ | CHIP2019 | 汉语 | [CHIP2019](https://www.biendata.xyz/competition/chip2019/) | 2万 | 平安医疗科技疾病问答迁移学习比赛数据集 | |
23
 
24
 
25
  <details>
26
  <summary>参考的数据来源,展开查看</summary>
27
  <pre><code>
 
28
  https://github.com/liucongg/NLPDataSet
29
 
30
  https://huggingface.co/datasets/tiansz/ChineseSTS
31
  https://zhuanlan.zhihu.com/p/454173790
 
 
32
  </code></pre>
33
  </details>
 
data/afqmc.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1c4a14884906f2b8bb1ccf917f55b1adc232d8a1557f514dce644e63bb416bc1
3
- size 7283904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6981c3caf19a0c691d2edb503030cb8dc06614087f0af1280b7cd14aa9b0fa43
3
+ size 8049102
data/bustm.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dc6e70479aa6e6a5f33a659b7638312e60690f0dfa6cf5d838b9ee54838def1
3
+ size 1346323
data/ccks2018_task3.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8b8a79ef5e8d50ca2cc7429327b9c805c21760e9229ca906307064566fb8280
3
- size 37594914
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c8c16081a423923e7289daea30fc16e1c4aa2f083da70583c47074d377a70ec
3
+ size 41554914
data/chinese_sts.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ebd3abfde24771ae1ebab39559053aee1ed40e6430d0e1bd3f27f7b64a3c908f
3
- size 6450047
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a03523b11d7009aa7c54a7b2dfb641c71b66e8f18cfa5ac61229ace85d99c516
3
+ size 6895331
data/chip2019.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:243e646755da05f3de0ed675694c77b72f2c27eb1f8406fa5ad68bd006b89c54
3
+ size 16187426
data/diac2019.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a4fa97c4f00dc3448324170d42652d9453bda7fb2b63ead01ad7bf5b9bb574e5
3
- size 33965412
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42a9249c77676ec45a711cc7fc32c85e890e1b38a7a3d14be5b23df9b71b9145
3
+ size 36910626
data/lcqmc.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c1b93d2bb210aec9056065a4138aa1584db2f3aa9555450a3079409660745dc
3
- size 40905940
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:160bb01f58111b57f139dd569283d6809cb4ccfc6b5a85b0938a403e9b18e594
3
+ size 45587164
examples/preprocess/process_afqmc.py CHANGED
@@ -66,7 +66,8 @@ def main():
66
  "sentence1": sentence1,
67
  "sentence2": sentence2,
68
  "label": label,
69
- "data_source": "lcqmc",
 
70
  "split": flag
71
  }
72
 
 
66
  "sentence1": sentence1,
67
  "sentence2": sentence2,
68
  "label": label,
69
+ "category": None,
70
+ "data_source": "afqmc",
71
  "split": flag
72
  }
73
 
examples/preprocess/process_bustm.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ import json
5
+ import os
6
+ from pathlib import Path
7
+ import sys
8
+
9
+ pwd = os.path.abspath(os.path.dirname(__file__))
10
+ sys.path.append(os.path.join(pwd, '../../'))
11
+
12
+ from datasets import load_dataset
13
+ from lxml import etree
14
+ import pandas as pd
15
+ from tqdm import tqdm
16
+
17
+ from project_settings import project_path
18
+
19
+
20
+ def get_args():
21
+ parser = argparse.ArgumentParser()
22
+
23
+ parser.add_argument("--data_dir", default="./data/bustm", type=str)
24
+ parser.add_argument(
25
+ "--output_file",
26
+ default=(project_path / "data/bustm.jsonl"),
27
+ type=str
28
+ )
29
+
30
+ args = parser.parse_args()
31
+ return args
32
+
33
+
34
+ def main():
35
+ args = get_args()
36
+ data_dir = Path(args.data_dir)
37
+
38
+ with open(args.output_file, "w", encoding="utf-8") as fout:
39
+ for name in ["train_few_all.json", "dev_few_all.json", "test_public.json", "test.json", "unlabeled.json"]:
40
+ filename = data_dir / name
41
+ with open(filename.as_posix(), "r", encoding="utf-8") as fin:
42
+ for row in fin:
43
+ row = json.loads(row)
44
+ sentence1 = row["sentence1"]
45
+ sentence2 = row["sentence2"]
46
+
47
+ if name == "train_few_all.json":
48
+ label = row["label"]
49
+ category = None
50
+ flag = "train"
51
+ elif name == "dev_few_all.json":
52
+ label = row["label"]
53
+ category = None
54
+ flag = "validation"
55
+ elif name == "test_public.json":
56
+ label = row["label"]
57
+ category = "test_labeled"
58
+ flag = "test"
59
+ elif name == "test.json":
60
+ label = None
61
+ category = "test_unlabeled"
62
+ flag = "test"
63
+ elif name == "unlabeled.json":
64
+ label = None
65
+ category = "unlabeled"
66
+ flag = "test"
67
+ else:
68
+ raise AssertionError
69
+
70
+ label = str(int(label)) if label is not None else None
71
+
72
+ if label not in ("0", "1", None):
73
+ raise AssertionError
74
+
75
+ row = {
76
+ "sentence1": sentence1,
77
+ "sentence2": sentence2,
78
+ "label": label,
79
+ "category": category,
80
+ "data_source": "bustm",
81
+ "split": flag
82
+ }
83
+
84
+ row = json.dumps(row, ensure_ascii=False)
85
+ fout.write("{}\n".format(row))
86
+
87
+ return
88
+
89
+
90
+ if __name__ == '__main__':
91
+ main()
examples/preprocess/process_ccks2018_task3.py CHANGED
@@ -65,6 +65,7 @@ def main():
65
  "sentence1": sentence1,
66
  "sentence2": sentence2,
67
  "label": label,
 
68
  "data_source": "ccks2018_task3",
69
  "split": flag
70
  }
 
65
  "sentence1": sentence1,
66
  "sentence2": sentence2,
67
  "label": label,
68
+ "category": None,
69
  "data_source": "ccks2018_task3",
70
  "split": flag
71
  }
examples/preprocess/process_chinese_sts.py CHANGED
@@ -64,6 +64,7 @@ def main():
64
  "sentence1": sentence1,
65
  "sentence2": sentence2,
66
  "label": label,
 
67
  "data_source": "ChineseSTS",
68
  "split": "train"
69
  }
 
64
  "sentence1": sentence1,
65
  "sentence2": sentence2,
66
  "label": label,
67
+ "category": None,
68
  "data_source": "ChineseSTS",
69
  "split": "train"
70
  }
examples/preprocess/process_chip2019.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python3
2
+ # -*- coding: utf-8 -*-
3
+ import argparse
4
+ import json
5
+ import os
6
+ from pathlib import Path
7
+ import sys
8
+
9
+ pwd = os.path.abspath(os.path.dirname(__file__))
10
+ sys.path.append(os.path.join(pwd, '../../'))
11
+
12
+ from datasets import load_dataset
13
+ import pandas as pd
14
+ from tqdm import tqdm
15
+
16
+ from project_settings import project_path
17
+
18
+
19
+ def get_args():
20
+ parser = argparse.ArgumentParser()
21
+
22
+ parser.add_argument("--data_dir", default="./data/chip2019", type=str)
23
+ parser.add_argument(
24
+ "--output_file",
25
+ default=(project_path / "data/chip2019.jsonl"),
26
+ type=str
27
+ )
28
+
29
+ args = parser.parse_args()
30
+ return args
31
+
32
+
33
+ def main():
34
+ args = get_args()
35
+
36
+ data_dir = Path(args.data_dir)
37
+
38
+ with open(args.output_file, "w", encoding="utf-8") as f:
39
+ for name in ["train.csv", "dev_id.csv", "test_final.csv"]:
40
+ filename = data_dir / name
41
+
42
+ df = pd.read_csv(filename.as_posix())
43
+ for i, row in df.iterrows():
44
+ question1 = row["question1"]
45
+ question2 = row["question2"]
46
+ category = row["category"]
47
+
48
+ if name == "train.csv":
49
+ label = row["label"]
50
+ flag = "train"
51
+ elif name == "dev_id.csv":
52
+ label = None
53
+ flag = "validation"
54
+ elif name == "test_final.csv":
55
+ label = None
56
+ flag = "test"
57
+ else:
58
+ raise AssertionError
59
+
60
+ label = str(int(label)) if label is not None else None
61
+
62
+ if label not in ("0", "1", None):
63
+ raise AssertionError
64
+
65
+ row = {
66
+ "sentence1": question1,
67
+ "sentence2": question2,
68
+ "label": label,
69
+ "category": category,
70
+ "data_source": "chip2019",
71
+ "split": flag
72
+ }
73
+
74
+ row = json.dumps(row, ensure_ascii=False)
75
+ f.write("{}\n".format(row))
76
+
77
+ return
78
+
79
+
80
+ if __name__ == '__main__':
81
+ main()
examples/preprocess/process_diac2019.py CHANGED
@@ -76,6 +76,7 @@ def main():
76
  "sentence1": q1,
77
  "sentence2": q2,
78
  "label": label,
 
79
  "data_source": "diac2019",
80
  "split": "train"
81
  }
@@ -95,6 +96,7 @@ def main():
95
  "sentence1": question1,
96
  "sentence2": question2,
97
  "score": "1",
 
98
  "data_source": "diac2019",
99
  "split": "validation"
100
  }
@@ -114,6 +116,7 @@ def main():
114
  "sentence1": question1,
115
  "sentence2": question2,
116
  "score": None,
 
117
  "data_source": "diac2019",
118
  "split": "test"
119
  }
 
76
  "sentence1": q1,
77
  "sentence2": q2,
78
  "label": label,
79
+ "category": None,
80
  "data_source": "diac2019",
81
  "split": "train"
82
  }
 
96
  "sentence1": question1,
97
  "sentence2": question2,
98
  "score": "1",
99
+ "category": None,
100
  "data_source": "diac2019",
101
  "split": "validation"
102
  }
 
116
  "sentence1": question1,
117
  "sentence2": question2,
118
  "score": None,
119
+ "category": None,
120
  "data_source": "diac2019",
121
  "split": "test"
122
  }
examples/preprocess/process_lcqmc.py CHANGED
@@ -67,6 +67,7 @@ def main():
67
  "sentence1": q1,
68
  "sentence2": q2,
69
  "label": label,
 
70
  "data_source": "lcqmc",
71
  "split": flag
72
  }
 
67
  "sentence1": q1,
68
  "sentence2": q2,
69
  "label": label,
70
+ "category": None,
71
  "data_source": "lcqmc",
72
  "split": flag
73
  }
sentence_pair.py CHANGED
@@ -47,6 +47,7 @@ class SentencePair(datasets.GeneratorBasedBuilder):
47
  "sentence1": datasets.Value("string"),
48
  "sentence2": datasets.Value("string"),
49
  "label": datasets.Value("string"),
 
50
  "data_source": datasets.Value("string"),
51
  "split": datasets.Value("string"),
52
  }
@@ -97,6 +98,7 @@ class SentencePair(datasets.GeneratorBasedBuilder):
97
  "sentence1": sample["sentence1"],
98
  "sentence2": sample["sentence2"],
99
  "label": sample["label"],
 
100
  "data_source": sample["data_source"],
101
  "split": sample["split"],
102
  }
 
47
  "sentence1": datasets.Value("string"),
48
  "sentence2": datasets.Value("string"),
49
  "label": datasets.Value("string"),
50
+ "category": datasets.Value("string"),
51
  "data_source": datasets.Value("string"),
52
  "split": datasets.Value("string"),
53
  }
 
98
  "sentence1": sample["sentence1"],
99
  "sentence2": sample["sentence2"],
100
  "label": sample["label"],
101
+ "category": sample["category"],
102
  "data_source": sample["data_source"],
103
  "split": sample["split"],
104
  }