[update]add ocnli
Browse files- README.md +3 -1
- data/chinese_mnli.jsonl +2 -2
- data/chinese_snli.jsonl +3 -0
- data/ocnli.jsonl +3 -0
- examples/preprocess/process_chinese_mnli.py +9 -3
- examples/preprocess/process_chinese_snli.py +86 -0
- examples/preprocess/process_ocnli.py +84 -0
- sentence_pair.py +2 -0
README.md
CHANGED
@@ -21,7 +21,9 @@ size_categories:
|
|
21 |
| BUSTM | 汉语 | [BUSTM](https://tianchi.aliyun.com/competition/entrance/531851/information); [BUSTM](https://github.com/xiaobu-coai/BUSTM) | 总样本数为:177173,其中,匹配样本个数为:54805,不匹配样本个数为:122368 | 小布助手对话短文本语义匹配比赛数据集 | [BUSTM](https://github.com/CLUEbenchmark/FewCLUE/tree/main/datasets/bustm) |
|
22 |
| CHIP2019 | 汉语 | [CHIP2019](https://www.biendata.xyz/competition/chip2019/) | 2万 | 平安医疗科技疾病问答迁移学习比赛数据集 | |
|
23 |
| COVID-19 | 汉语 | [COVID-19](https://tianchi.aliyun.com/competition/entrance/231776/information) | | 天池新冠疫情相似句对判定大赛 | [COVID-19](https://gitee.com/liangzongchang/COVID-19-sentence-pair/) |
|
24 |
-
| Chinese-MNLI | 汉语 | [Chinese-MNLI](https://github.com/pluto-junzeng/CNSD) | TRAIN: 390K, VALID: 12K, TEST: 13K |
|
|
|
|
|
25 |
|
26 |
|
27 |
<details>
|
|
|
21 |
| BUSTM | 汉语 | [BUSTM](https://tianchi.aliyun.com/competition/entrance/531851/information); [BUSTM](https://github.com/xiaobu-coai/BUSTM) | 总样本数为:177173,其中,匹配样本个数为:54805,不匹配样本个数为:122368 | 小布助手对话短文本语义匹配比赛数据集 | [BUSTM](https://github.com/CLUEbenchmark/FewCLUE/tree/main/datasets/bustm) |
|
22 |
| CHIP2019 | 汉语 | [CHIP2019](https://www.biendata.xyz/competition/chip2019/) | 2万 | 平安医疗科技疾病问答迁移学习比赛数据集 | |
|
23 |
| COVID-19 | 汉语 | [COVID-19](https://tianchi.aliyun.com/competition/entrance/231776/information) | | 天池新冠疫情相似句对判定大赛 | [COVID-19](https://gitee.com/liangzongchang/COVID-19-sentence-pair/) |
|
24 |
+
| Chinese-MNLI | 汉语 | [Chinese-MNLI](https://github.com/pluto-junzeng/CNSD) | TRAIN: 390K, VALID: 12K, TEST: 13K | 通过翻译加部分人工修正的方法,从英文原数据集生成(原数据是:蕴含,中性,冲突,的句子推理数据集,已转换为句子对)。 | |
|
25 |
+
| Chinese-SNLI | 汉语 | [Chinese-SNLI](https://github.com/pluto-junzeng/CNSD) | TRAIN: 550K, VALID: 10K, TEST: 10K | 通过翻译加部分人工修正的方法,从英文原数据集生成(原数据是:蕴含,中性,冲突,的句子推理数据集,已转换为句子对)。 | |
|
26 |
+
| OCNLI | 汉语 | [OCNLI](https://github.com/CLUEbenchmark/OCNLI) | TRAIN: 50K, VALID: 3K, TEST: 3K | 原生中文自然语言推理数据集,是第一个非翻译的、使用原生汉语的大型中文自然语言推理数据集。 | |
|
27 |
|
28 |
|
29 |
<details>
|
data/chinese_mnli.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f0b87b6ebc3b8124ca4f1cbb0e5055e4941c0f8f93014248f93d82cbf617a585
|
3 |
+
size 117078623
|
data/chinese_snli.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0616f76d77f83a4bbc4ff0b0cbea1a018dbdcf8c7dc94538a062284d9626704f
|
3 |
+
size 125893606
|
data/ocnli.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:12b5f3a0e082d09f29d646024f73c870906c29adc81cdbd9c6c519fe276c3f33
|
3 |
+
size 12092258
|
examples/preprocess/process_chinese_mnli.py
CHANGED
@@ -45,16 +45,22 @@ def main():
|
|
45 |
sentence2 = row["sentence2"]
|
46 |
gold_label = row["gold_label"]
|
47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
if name == "cnsd_multil_train.jsonl":
|
49 |
-
label = "1" if gold_label in ("entailment",) else "0"
|
50 |
flag = "train"
|
51 |
category = "cnsd_multil_train"
|
52 |
elif name == "cnsd_multil_dev_matched.jsonl":
|
53 |
-
label = "1" if gold_label in ("entailment",) else "0"
|
54 |
flag = "validation"
|
55 |
category = "cnsd_multil_dev_matched"
|
56 |
elif name == "cnsd_multil_dev_mismatched.jsonl":
|
57 |
-
label = "1" if gold_label in ("entailment",) else "0"
|
58 |
flag = "validation"
|
59 |
category = "cnsd_multil_dev_mismatched"
|
60 |
else:
|
|
|
45 |
sentence2 = row["sentence2"]
|
46 |
gold_label = row["gold_label"]
|
47 |
|
48 |
+
if gold_label not in ("entailment", "neutral", "contradiction", "-"):
|
49 |
+
print(gold_label)
|
50 |
+
raise AssertionError
|
51 |
+
|
52 |
+
if gold_label in ("-",):
|
53 |
+
continue
|
54 |
+
|
55 |
+
label = "1" if gold_label in ("entailment",) else "0"
|
56 |
+
|
57 |
if name == "cnsd_multil_train.jsonl":
|
|
|
58 |
flag = "train"
|
59 |
category = "cnsd_multil_train"
|
60 |
elif name == "cnsd_multil_dev_matched.jsonl":
|
|
|
61 |
flag = "validation"
|
62 |
category = "cnsd_multil_dev_matched"
|
63 |
elif name == "cnsd_multil_dev_mismatched.jsonl":
|
|
|
64 |
flag = "validation"
|
65 |
category = "cnsd_multil_dev_mismatched"
|
66 |
else:
|
examples/preprocess/process_chinese_snli.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import argparse
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
from pathlib import Path
|
7 |
+
import sys
|
8 |
+
|
9 |
+
pwd = os.path.abspath(os.path.dirname(__file__))
|
10 |
+
sys.path.append(os.path.join(pwd, '../../'))
|
11 |
+
|
12 |
+
from datasets import load_dataset
|
13 |
+
from tqdm import tqdm
|
14 |
+
|
15 |
+
from project_settings import project_path
|
16 |
+
|
17 |
+
|
18 |
+
def get_args():
|
19 |
+
parser = argparse.ArgumentParser()
|
20 |
+
|
21 |
+
parser.add_argument("--data_dir", default="./data/chinese_snli", type=str)
|
22 |
+
|
23 |
+
parser.add_argument(
|
24 |
+
"--output_file",
|
25 |
+
default=(project_path / "data/chinese_snli.jsonl"),
|
26 |
+
type=str
|
27 |
+
)
|
28 |
+
|
29 |
+
args = parser.parse_args()
|
30 |
+
return args
|
31 |
+
|
32 |
+
|
33 |
+
def main():
|
34 |
+
args = get_args()
|
35 |
+
|
36 |
+
data_dir = Path(args.data_dir)
|
37 |
+
|
38 |
+
with open(args.output_file, "w", encoding="utf-8") as fout:
|
39 |
+
for name in ["cnsd_snli_v1.0.train.jsonl", "cnsd_snli_v1.0.dev.jsonl", "cnsd_snli_v1.0.test.jsonl"]:
|
40 |
+
filename = data_dir / name
|
41 |
+
with open(filename, "r", encoding="utf-8") as fin:
|
42 |
+
for row in fin:
|
43 |
+
row = json.loads(row)
|
44 |
+
sentence1 = row["sentence1"]
|
45 |
+
sentence2 = row["sentence2"]
|
46 |
+
gold_label = row["gold_label"]
|
47 |
+
|
48 |
+
if gold_label not in ("entailment", "neutral", "contradiction", "-"):
|
49 |
+
print(gold_label)
|
50 |
+
raise AssertionError
|
51 |
+
|
52 |
+
if gold_label in ("-",):
|
53 |
+
continue
|
54 |
+
|
55 |
+
label = "1" if gold_label in ("entailment",) else "0"
|
56 |
+
category = gold_label
|
57 |
+
|
58 |
+
if name == "cnsd_snli_v1.0.train.jsonl":
|
59 |
+
flag = "train"
|
60 |
+
elif name == "cnsd_snli_v1.0.dev.jsonl":
|
61 |
+
flag = "validation"
|
62 |
+
elif name == "cnsd_snli_v1.0.test.jsonl":
|
63 |
+
flag = "test"
|
64 |
+
else:
|
65 |
+
raise AssertionError
|
66 |
+
|
67 |
+
if label not in ("0", "1", None):
|
68 |
+
raise AssertionError
|
69 |
+
|
70 |
+
row = {
|
71 |
+
"sentence1": sentence1,
|
72 |
+
"sentence2": sentence2,
|
73 |
+
"label": label,
|
74 |
+
"category": category,
|
75 |
+
"data_source": "Chinese-SNLI",
|
76 |
+
"split": flag
|
77 |
+
}
|
78 |
+
|
79 |
+
row = json.dumps(row, ensure_ascii=False)
|
80 |
+
fout.write("{}\n".format(row))
|
81 |
+
|
82 |
+
return
|
83 |
+
|
84 |
+
|
85 |
+
if __name__ == '__main__':
|
86 |
+
main()
|
examples/preprocess/process_ocnli.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import argparse
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
from pathlib import Path
|
7 |
+
import sys
|
8 |
+
|
9 |
+
pwd = os.path.abspath(os.path.dirname(__file__))
|
10 |
+
sys.path.append(os.path.join(pwd, '../../'))
|
11 |
+
|
12 |
+
from datasets import load_dataset
|
13 |
+
from tqdm import tqdm
|
14 |
+
|
15 |
+
from project_settings import project_path
|
16 |
+
|
17 |
+
|
18 |
+
def get_args():
|
19 |
+
parser = argparse.ArgumentParser()
|
20 |
+
|
21 |
+
parser.add_argument("--data_dir", default="./data/ocnli/data/ocnli", type=str)
|
22 |
+
|
23 |
+
parser.add_argument(
|
24 |
+
"--output_file",
|
25 |
+
default=(project_path / "data/ocnli.jsonl"),
|
26 |
+
type=str
|
27 |
+
)
|
28 |
+
|
29 |
+
args = parser.parse_args()
|
30 |
+
return args
|
31 |
+
|
32 |
+
|
33 |
+
def main():
|
34 |
+
args = get_args()
|
35 |
+
|
36 |
+
data_dir = Path(args.data_dir)
|
37 |
+
|
38 |
+
with open(args.output_file, "w", encoding="utf-8") as fout:
|
39 |
+
for name in ["train.50k.json", "dev.json", "test.json"]:
|
40 |
+
filename = data_dir / name
|
41 |
+
with open(filename, "r", encoding="utf-8") as fin:
|
42 |
+
for row in fin:
|
43 |
+
row = json.loads(row)
|
44 |
+
sentence1 = row["sentence1"]
|
45 |
+
sentence2 = row["sentence2"]
|
46 |
+
label = row.get("label", None)
|
47 |
+
category = row.get("genre", None)
|
48 |
+
|
49 |
+
if label not in ("entailment", "neutral", "contradiction", "-", None):
|
50 |
+
print(label)
|
51 |
+
raise AssertionError
|
52 |
+
|
53 |
+
if label in ("-",):
|
54 |
+
continue
|
55 |
+
|
56 |
+
if label is not None:
|
57 |
+
label = "1" if label in ("entailment",) else "0"
|
58 |
+
|
59 |
+
if name == "train.50k.json":
|
60 |
+
flag = "train"
|
61 |
+
elif name == "dev.json":
|
62 |
+
flag = "validation"
|
63 |
+
elif name == "test.json":
|
64 |
+
flag = "test"
|
65 |
+
else:
|
66 |
+
raise AssertionError
|
67 |
+
|
68 |
+
row = {
|
69 |
+
"sentence1": sentence1,
|
70 |
+
"sentence2": sentence2,
|
71 |
+
"label": label,
|
72 |
+
"category": category,
|
73 |
+
"data_source": "OCNLI",
|
74 |
+
"split": flag
|
75 |
+
}
|
76 |
+
|
77 |
+
row = json.dumps(row, ensure_ascii=False)
|
78 |
+
fout.write("{}\n".format(row))
|
79 |
+
|
80 |
+
return
|
81 |
+
|
82 |
+
|
83 |
+
if __name__ == '__main__':
|
84 |
+
main()
|
sentence_pair.py
CHANGED
@@ -13,11 +13,13 @@ _URLS = {
|
|
13 |
"bustm": "data/bustm.jsonl",
|
14 |
"ccks2018_task3": "data/ccks2018_task3.jsonl",
|
15 |
"chinese_mnli": "data/chinese_mnli.jsonl",
|
|
|
16 |
"chinese_sts": "data/chinese_sts.jsonl",
|
17 |
"chip2019": "data/chip2019.jsonl",
|
18 |
"covid_19": "data/covid_19.jsonl",
|
19 |
"diac2019": "data/diac2019.jsonl",
|
20 |
"lcqmc": "data/lcqmc.jsonl",
|
|
|
21 |
|
22 |
}
|
23 |
|
|
|
13 |
"bustm": "data/bustm.jsonl",
|
14 |
"ccks2018_task3": "data/ccks2018_task3.jsonl",
|
15 |
"chinese_mnli": "data/chinese_mnli.jsonl",
|
16 |
+
"chinese_snli": "data/chinese_snli.jsonl",
|
17 |
"chinese_sts": "data/chinese_sts.jsonl",
|
18 |
"chip2019": "data/chip2019.jsonl",
|
19 |
"covid_19": "data/covid_19.jsonl",
|
20 |
"diac2019": "data/diac2019.jsonl",
|
21 |
"lcqmc": "data/lcqmc.jsonl",
|
22 |
+
"ocnli": "data/ocnli.jsonl",
|
23 |
|
24 |
}
|
25 |
|