[update]add code
Browse files- .gitattributes +1 -0
- .gitignore +12 -0
- README.md +13 -19
- examples/preprocess/process_afqmc.py +80 -0
- examples/preprocess/process_ccks2018_task3.py +79 -0
- examples/preprocess/process_chinese_sts.py +78 -0
- examples/preprocess/process_diac2019.py +128 -0
- examples/preprocess/process_lcqmc.py +81 -0
- main.py +16 -0
- project_settings.py +12 -0
- requirements.txt +7 -0
- sentence_pair.py +107 -0
.gitattributes
CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
|
|
|
53 |
*.jpg filter=lfs diff=lfs merge=lfs -text
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
+
*.jsonl filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
.git/
|
3 |
+
.idea/
|
4 |
+
|
5 |
+
data/
|
6 |
+
examples/preprocess/data/
|
7 |
+
hub_datasets/
|
8 |
+
|
9 |
+
**/__pycache__/
|
10 |
+
|
11 |
+
**/*.csv
|
12 |
+
**/*.xlsx
|
README.md
CHANGED
@@ -11,30 +11,24 @@ size_categories:
|
|
11 |
## 句子对数据集
|
12 |
|
13 |
数据集从网上收集整理如下:
|
14 |
-
| 数据 | 原始数据/项目地址 | 样本个数 | 原始数据描述 | 替代数据下载地址 |
|
15 |
-
| :--- | :---: | :---: | :---: | :---: |
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
<details>
|
19 |
<summary>参考的数据来源,展开查看</summary>
|
20 |
<pre><code>
|
21 |
|
22 |
-
https://
|
23 |
-
|
24 |
-
https://huggingface.co/datasets/
|
25 |
-
https://
|
26 |
-
|
27 |
-
https://huggingface.co/datasets/SetFit/amazon_massive_intent_zh-CN
|
28 |
-
https://huggingface.co/datasets/SetFit/amazon_massive_intent_zh-TW
|
29 |
-
https://huggingface.co/datasets/snips_built_in_intents
|
30 |
-
https://huggingface.co/datasets/zapsdcn/citation_intent
|
31 |
-
https://huggingface.co/datasets/ibm/vira-intents
|
32 |
-
https://huggingface.co/datasets/mteb/mtop_intent
|
33 |
-
https://huggingface.co/datasets/Bhuvaneshwari/intent_classification
|
34 |
-
https://huggingface.co/datasets/ibm/vira-intents-live
|
35 |
-
https://huggingface.co/datasets/ebrigham/nl_banking_intents
|
36 |
-
https://pan.baidu.com/s/19_oqY4bC_lJa_7Mc6lxU7w?pwd=v4bi
|
37 |
-
https://gitee.com/a2798063/SMP2019/tree/master
|
38 |
|
39 |
</code></pre>
|
40 |
</details>
|
|
|
11 |
## 句子对数据集
|
12 |
|
13 |
数据集从网上收集整理如下:
|
14 |
+
| 数据 | 语言 | 原始数据/项目地址 | 样本个数 | 原始数据描述 | 替代数据下载地址 |
|
15 |
+
| :--- | :---: | :---: | :---: | :---: | :---: |
|
16 |
+
| ChineseSTS | 汉语 | [ChineseSTS](https://github.com/IAdmireu/ChineseSTS) | 24.7K | STS 中文文本语义相似度(使用时注意打乱数据集) | [ChineseSTS](https://huggingface.co/datasets/tiansz/ChineseSTS) |
|
17 |
+
| ccks2018_task3 | 汉语 | [CCKS2018_3](https://www.biendata.xyz/competition/CCKS2018_3/data/) | 100K | CCKS 2018 微众银行智能客服问句匹配大赛 | |
|
18 |
+
| DIAC2019 | 汉语 | [DIAC2019](https://www.biendata.xyz/competition/2019diac/data/) | 6K | 以问题组的形式提供,每组问句又分为等价部分和不等价部分,等价问句之间互相组合可以生成正样本,等价问句和不等价问句之间互相组合可以生成负样本。我们提供6000组问句的训练集。 | |
|
19 |
+
| LCQMC | 汉语 | [LCQMC](https://www.luge.ai/#/luge/dataDetail?id=14); [C18-1166.pdf](https://aclanthology.org/C18-1166.pdf) | TRAIN: 238766, VALID: 8802, TEST: 12500 | 百度知道领域的中文问题匹配数据集,目的是为了解决在中文领域大规模问题匹配数据集的缺失。该数据集从百度知道不同领域的用户问题中抽取构建数据。| [lcqmc_data](https://github.com/xiaohai-AI/lcqmc_data) |
|
20 |
+
| AFQMC | 汉语 | [AFQMC](https://tianchi.aliyun.com/dataset/106411) | TRAIN: 34334, VALID: 4316, TEST: 3861 | 蚂蚁金融语义相似度数据集,用于问题相似度计算。即:给定客服里用户描述的两句话,用算法来判断是否表示了相同的语义。 | |
|
21 |
+
|
22 |
|
23 |
<details>
|
24 |
<summary>参考的数据来源,展开查看</summary>
|
25 |
<pre><code>
|
26 |
|
27 |
+
https://github.com/liucongg/NLPDataSet
|
28 |
+
|
29 |
+
https://huggingface.co/datasets/tiansz/ChineseSTS
|
30 |
+
https://zhuanlan.zhihu.com/p/454173790
|
31 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
</code></pre>
|
34 |
</details>
|
examples/preprocess/process_afqmc.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import argparse
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
from pathlib import Path
|
7 |
+
import sys
|
8 |
+
|
9 |
+
pwd = os.path.abspath(os.path.dirname(__file__))
|
10 |
+
sys.path.append(os.path.join(pwd, '../../'))
|
11 |
+
|
12 |
+
from datasets import load_dataset
|
13 |
+
from lxml import etree
|
14 |
+
import pandas as pd
|
15 |
+
from tqdm import tqdm
|
16 |
+
|
17 |
+
from project_settings import project_path
|
18 |
+
|
19 |
+
|
20 |
+
def get_args():
|
21 |
+
parser = argparse.ArgumentParser()
|
22 |
+
|
23 |
+
parser.add_argument("--data_dir", default="./data/afqmc", type=str)
|
24 |
+
parser.add_argument(
|
25 |
+
"--output_file",
|
26 |
+
default=(project_path / "data/afqmc.jsonl"),
|
27 |
+
type=str
|
28 |
+
)
|
29 |
+
|
30 |
+
args = parser.parse_args()
|
31 |
+
return args
|
32 |
+
|
33 |
+
|
34 |
+
def main():
|
35 |
+
args = get_args()
|
36 |
+
|
37 |
+
data_dir = Path(args.data_dir)
|
38 |
+
|
39 |
+
with open(args.output_file, "w", encoding="utf-8") as fout:
|
40 |
+
for name in ["train.json", "dev.json", "test.json"]:
|
41 |
+
filename = data_dir / name
|
42 |
+
|
43 |
+
with open(filename.as_posix(), "r", encoding="utf-8") as fin:
|
44 |
+
for row in fin:
|
45 |
+
row = json.loads(row)
|
46 |
+
|
47 |
+
sentence1 = row["sentence1"]
|
48 |
+
sentence2 = row["sentence2"]
|
49 |
+
|
50 |
+
if name == "train.json":
|
51 |
+
label = row["label"]
|
52 |
+
flag = "train"
|
53 |
+
elif name == "dev.json":
|
54 |
+
label = row["label"]
|
55 |
+
flag = "validation"
|
56 |
+
elif name == "test.json":
|
57 |
+
label = None
|
58 |
+
flag = "test"
|
59 |
+
else:
|
60 |
+
raise AssertionError
|
61 |
+
|
62 |
+
if label not in ("0", "1", None):
|
63 |
+
raise AssertionError
|
64 |
+
|
65 |
+
row = {
|
66 |
+
"sentence1": sentence1,
|
67 |
+
"sentence2": sentence2,
|
68 |
+
"label": label,
|
69 |
+
"data_source": "lcqmc",
|
70 |
+
"split": flag
|
71 |
+
}
|
72 |
+
|
73 |
+
row = json.dumps(row, ensure_ascii=False)
|
74 |
+
fout.write("{}\n".format(row))
|
75 |
+
|
76 |
+
return
|
77 |
+
|
78 |
+
|
79 |
+
if __name__ == '__main__':
|
80 |
+
main()
|
examples/preprocess/process_ccks2018_task3.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import argparse
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
from pathlib import Path
|
7 |
+
import sys
|
8 |
+
|
9 |
+
pwd = os.path.abspath(os.path.dirname(__file__))
|
10 |
+
sys.path.append(os.path.join(pwd, '../../'))
|
11 |
+
|
12 |
+
from datasets import load_dataset
|
13 |
+
from tqdm import tqdm
|
14 |
+
|
15 |
+
from project_settings import project_path
|
16 |
+
|
17 |
+
|
18 |
+
def get_args():
|
19 |
+
parser = argparse.ArgumentParser()
|
20 |
+
|
21 |
+
parser.add_argument("--data_dir", default="./data/ccks2018_task3", type=str)
|
22 |
+
parser.add_argument(
|
23 |
+
"--output_file",
|
24 |
+
default=(project_path / "data/ccks2018_task3.jsonl"),
|
25 |
+
type=str
|
26 |
+
)
|
27 |
+
|
28 |
+
args = parser.parse_args()
|
29 |
+
return args
|
30 |
+
|
31 |
+
|
32 |
+
def main():
|
33 |
+
args = get_args()
|
34 |
+
|
35 |
+
data_dir = Path(args.data_dir)
|
36 |
+
|
37 |
+
with open(args.output_file, "w", encoding="utf-8") as f:
|
38 |
+
for name in ["task3_train.txt", "task3_dev.txt", "test_with_id.txt"]:
|
39 |
+
filename = data_dir / name
|
40 |
+
with open(filename, "r", encoding="utf-8") as fin:
|
41 |
+
for row in fin:
|
42 |
+
row = str(row).strip()
|
43 |
+
splits = row.split("\t")
|
44 |
+
# print(splits)
|
45 |
+
if name == "task3_train.txt":
|
46 |
+
sentence1, sentence2, label = splits
|
47 |
+
flag = "train"
|
48 |
+
elif name == "task3_dev.txt":
|
49 |
+
sentence1, sentence2 = splits[-2:]
|
50 |
+
label = "1"
|
51 |
+
flag = "validation"
|
52 |
+
elif name == "test_with_id.txt":
|
53 |
+
sentence1, sentence2 = splits[-2:]
|
54 |
+
label = None
|
55 |
+
flag = "test"
|
56 |
+
else:
|
57 |
+
raise AssertionError
|
58 |
+
|
59 |
+
label = str(int(label)) if label is not None else None
|
60 |
+
|
61 |
+
if label not in ("0", "1", None):
|
62 |
+
raise AssertionError
|
63 |
+
|
64 |
+
row = {
|
65 |
+
"sentence1": sentence1,
|
66 |
+
"sentence2": sentence2,
|
67 |
+
"label": label,
|
68 |
+
"data_source": "ccks2018_task3",
|
69 |
+
"split": flag
|
70 |
+
}
|
71 |
+
|
72 |
+
row = json.dumps(row, ensure_ascii=False)
|
73 |
+
f.write("{}\n".format(row))
|
74 |
+
|
75 |
+
return
|
76 |
+
|
77 |
+
|
78 |
+
if __name__ == '__main__':
|
79 |
+
main()
|
examples/preprocess/process_chinese_sts.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import argparse
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
import sys
|
7 |
+
|
8 |
+
pwd = os.path.abspath(os.path.dirname(__file__))
|
9 |
+
sys.path.append(os.path.join(pwd, '../../'))
|
10 |
+
|
11 |
+
from datasets import load_dataset
|
12 |
+
from tqdm import tqdm
|
13 |
+
|
14 |
+
from project_settings import project_path
|
15 |
+
|
16 |
+
|
17 |
+
def get_args():
|
18 |
+
parser = argparse.ArgumentParser()
|
19 |
+
|
20 |
+
parser.add_argument("--dataset_path", default="tiansz/ChineseSTS", type=str)
|
21 |
+
parser.add_argument(
|
22 |
+
"--dataset_cache_dir",
|
23 |
+
default=(project_path / "hub_datasets").as_posix(),
|
24 |
+
type=str
|
25 |
+
)
|
26 |
+
parser.add_argument(
|
27 |
+
"--output_file",
|
28 |
+
default=(project_path / "data/chinese_sts.jsonl"),
|
29 |
+
type=str
|
30 |
+
)
|
31 |
+
|
32 |
+
args = parser.parse_args()
|
33 |
+
return args
|
34 |
+
|
35 |
+
|
36 |
+
def main():
|
37 |
+
args = get_args()
|
38 |
+
|
39 |
+
dataset_dict = load_dataset(
|
40 |
+
path=args.dataset_path,
|
41 |
+
cache_dir=args.dataset_cache_dir,
|
42 |
+
)
|
43 |
+
print(dataset_dict)
|
44 |
+
with open(args.output_file, "w", encoding="utf-8") as f:
|
45 |
+
for sample in tqdm(dataset_dict["train"]):
|
46 |
+
text = sample["text"]
|
47 |
+
|
48 |
+
if text == "句子1\t句子2\t相似度":
|
49 |
+
continue
|
50 |
+
|
51 |
+
splits = text.split("\t")
|
52 |
+
if len(splits) != 3:
|
53 |
+
raise AssertionError
|
54 |
+
|
55 |
+
sentence1 = splits[0]
|
56 |
+
sentence2 = splits[1]
|
57 |
+
label = splits[2]
|
58 |
+
label = str(int(float(label)))
|
59 |
+
|
60 |
+
if label not in ("0", "1", None):
|
61 |
+
raise AssertionError
|
62 |
+
|
63 |
+
row = {
|
64 |
+
"sentence1": sentence1,
|
65 |
+
"sentence2": sentence2,
|
66 |
+
"label": label,
|
67 |
+
"data_source": "ChineseSTS",
|
68 |
+
"split": "train"
|
69 |
+
}
|
70 |
+
|
71 |
+
row = json.dumps(row, ensure_ascii=False)
|
72 |
+
f.write("{}\n".format(row))
|
73 |
+
|
74 |
+
return
|
75 |
+
|
76 |
+
|
77 |
+
if __name__ == '__main__':
|
78 |
+
main()
|
examples/preprocess/process_diac2019.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import argparse
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
from pathlib import Path
|
7 |
+
import sys
|
8 |
+
|
9 |
+
pwd = os.path.abspath(os.path.dirname(__file__))
|
10 |
+
sys.path.append(os.path.join(pwd, '../../'))
|
11 |
+
|
12 |
+
from datasets import load_dataset
|
13 |
+
from lxml import etree
|
14 |
+
import pandas as pd
|
15 |
+
from tqdm import tqdm
|
16 |
+
|
17 |
+
from project_settings import project_path
|
18 |
+
|
19 |
+
|
20 |
+
def get_args():
|
21 |
+
parser = argparse.ArgumentParser()
|
22 |
+
|
23 |
+
parser.add_argument("--data_dir", default="./data/diac2019", type=str)
|
24 |
+
parser.add_argument(
|
25 |
+
"--output_file",
|
26 |
+
default=(project_path / "data/diac2019.jsonl"),
|
27 |
+
type=str
|
28 |
+
)
|
29 |
+
|
30 |
+
args = parser.parse_args()
|
31 |
+
return args
|
32 |
+
|
33 |
+
|
34 |
+
def main():
|
35 |
+
args = get_args()
|
36 |
+
|
37 |
+
data_dir = Path(args.data_dir)
|
38 |
+
|
39 |
+
with open(args.output_file, "w", encoding="utf-8") as f:
|
40 |
+
# train_set.xml
|
41 |
+
filename = data_dir / "train_set.xml"
|
42 |
+
tree = etree.parse(filename.as_posix())
|
43 |
+
root = tree.getroot()
|
44 |
+
|
45 |
+
for group in root:
|
46 |
+
equal = group[0]
|
47 |
+
not_equal = group[1]
|
48 |
+
|
49 |
+
equal_questions = [question.text for question in equal]
|
50 |
+
not_equal_questions = [question.text for question in not_equal]
|
51 |
+
|
52 |
+
pairs = set()
|
53 |
+
|
54 |
+
for q1 in equal_questions:
|
55 |
+
for q2 in equal_questions:
|
56 |
+
if q1 == q2:
|
57 |
+
continue
|
58 |
+
pair = (q1, q2, 1)
|
59 |
+
pairs.add(pair)
|
60 |
+
|
61 |
+
for q1 in equal_questions:
|
62 |
+
for q2 in not_equal_questions:
|
63 |
+
if q1 == q2:
|
64 |
+
continue
|
65 |
+
pair = (q1, q2, 0)
|
66 |
+
pairs.add(pair)
|
67 |
+
|
68 |
+
for pair in pairs:
|
69 |
+
q1, q2, label = pair
|
70 |
+
|
71 |
+
label = str(label)
|
72 |
+
if label not in ("0", "1", None):
|
73 |
+
raise AssertionError
|
74 |
+
|
75 |
+
json_row = {
|
76 |
+
"sentence1": q1,
|
77 |
+
"sentence2": q2,
|
78 |
+
"label": label,
|
79 |
+
"data_source": "diac2019",
|
80 |
+
"split": "train"
|
81 |
+
}
|
82 |
+
|
83 |
+
json_row = json.dumps(json_row, ensure_ascii=False)
|
84 |
+
f.write("{}\n".format(json_row))
|
85 |
+
|
86 |
+
# dev_set.csv
|
87 |
+
filename = data_dir / "dev_set.csv"
|
88 |
+
df = pd.read_csv(filename.as_posix(), delimiter="\t")
|
89 |
+
for i, row in df.iterrows():
|
90 |
+
|
91 |
+
question1 = row["question1"]
|
92 |
+
question2 = row["question2"]
|
93 |
+
|
94 |
+
json_row = {
|
95 |
+
"sentence1": question1,
|
96 |
+
"sentence2": question2,
|
97 |
+
"score": "1",
|
98 |
+
"data_source": "diac2019",
|
99 |
+
"split": "validation"
|
100 |
+
}
|
101 |
+
|
102 |
+
json_row = json.dumps(json_row, ensure_ascii=False)
|
103 |
+
f.write("{}\n".format(json_row))
|
104 |
+
|
105 |
+
# test_set.csv
|
106 |
+
filename = data_dir / "test_set.csv"
|
107 |
+
df = pd.read_csv(filename.as_posix(), delimiter="\t")
|
108 |
+
for i, row in df.iterrows():
|
109 |
+
|
110 |
+
question1 = row["question1"]
|
111 |
+
question2 = row["question2"]
|
112 |
+
|
113 |
+
json_row = {
|
114 |
+
"sentence1": question1,
|
115 |
+
"sentence2": question2,
|
116 |
+
"score": None,
|
117 |
+
"data_source": "diac2019",
|
118 |
+
"split": "test"
|
119 |
+
}
|
120 |
+
|
121 |
+
json_row = json.dumps(json_row, ensure_ascii=False)
|
122 |
+
f.write("{}\n".format(json_row))
|
123 |
+
|
124 |
+
return
|
125 |
+
|
126 |
+
|
127 |
+
if __name__ == '__main__':
|
128 |
+
main()
|
examples/preprocess/process_lcqmc.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import argparse
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
from pathlib import Path
|
7 |
+
import sys
|
8 |
+
|
9 |
+
pwd = os.path.abspath(os.path.dirname(__file__))
|
10 |
+
sys.path.append(os.path.join(pwd, '../../'))
|
11 |
+
|
12 |
+
from datasets import load_dataset
|
13 |
+
from lxml import etree
|
14 |
+
import pandas as pd
|
15 |
+
from tqdm import tqdm
|
16 |
+
|
17 |
+
from project_settings import project_path
|
18 |
+
|
19 |
+
|
20 |
+
def get_args():
|
21 |
+
parser = argparse.ArgumentParser()
|
22 |
+
|
23 |
+
parser.add_argument("--data_dir", default="./data/lcqmc", type=str)
|
24 |
+
parser.add_argument(
|
25 |
+
"--output_file",
|
26 |
+
default=(project_path / "data/lcqmc.jsonl"),
|
27 |
+
type=str
|
28 |
+
)
|
29 |
+
|
30 |
+
args = parser.parse_args()
|
31 |
+
return args
|
32 |
+
|
33 |
+
|
34 |
+
def main():
|
35 |
+
args = get_args()
|
36 |
+
|
37 |
+
data_dir = Path(args.data_dir)
|
38 |
+
|
39 |
+
with open(args.output_file, "w", encoding="utf-8") as fout:
|
40 |
+
for name in ["train.tsv", "dev.tsv", "test.tsv"]:
|
41 |
+
filename = data_dir / name
|
42 |
+
|
43 |
+
with open(filename.as_posix(), "r", encoding="utf-8") as fin:
|
44 |
+
for row in fin:
|
45 |
+
row = str(row).strip()
|
46 |
+
splits = row.split("\t")
|
47 |
+
|
48 |
+
if name == "train.tsv":
|
49 |
+
q1, q2, label = splits
|
50 |
+
flag = "train"
|
51 |
+
elif name == "dev.tsv":
|
52 |
+
q1, q2, label = splits
|
53 |
+
flag = "validation"
|
54 |
+
elif name == "test.tsv":
|
55 |
+
q1, q2 = splits
|
56 |
+
label = None
|
57 |
+
flag = "test"
|
58 |
+
else:
|
59 |
+
raise AssertionError
|
60 |
+
|
61 |
+
label = str(int(label)) if label is not None else None
|
62 |
+
|
63 |
+
if label not in ("0", "1", None):
|
64 |
+
raise AssertionError
|
65 |
+
|
66 |
+
row = {
|
67 |
+
"sentence1": q1,
|
68 |
+
"sentence2": q2,
|
69 |
+
"label": label,
|
70 |
+
"data_source": "lcqmc",
|
71 |
+
"split": flag
|
72 |
+
}
|
73 |
+
|
74 |
+
row = json.dumps(row, ensure_ascii=False)
|
75 |
+
fout.write("{}\n".format(row))
|
76 |
+
|
77 |
+
return
|
78 |
+
|
79 |
+
|
80 |
+
if __name__ == '__main__':
|
81 |
+
main()
|
main.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
from datasets import load_dataset
|
4 |
+
|
5 |
+
dataset = load_dataset(
|
6 |
+
"sentence_pair.py",
|
7 |
+
name="ccks2018_task3",
|
8 |
+
split="validation",
|
9 |
+
)
|
10 |
+
|
11 |
+
for sample in dataset:
|
12 |
+
print(sample)
|
13 |
+
|
14 |
+
|
15 |
+
if __name__ == '__main__':
|
16 |
+
pass
|
project_settings.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
import os
|
4 |
+
from pathlib import Path
|
5 |
+
|
6 |
+
|
7 |
+
project_path = os.path.abspath(os.path.dirname(__file__))
|
8 |
+
project_path = Path(project_path)
|
9 |
+
|
10 |
+
|
11 |
+
if __name__ == '__main__':
|
12 |
+
pass
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
datasets
|
2 |
+
tqdm==4.66.1
|
3 |
+
pandas==2.0.3
|
4 |
+
xlrd==1.2.0
|
5 |
+
openpyxl==3.0.9
|
6 |
+
fsspec==2023.9.2
|
7 |
+
lxml==4.9.3
|
sentence_pair.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
from glob import glob
|
4 |
+
import json
|
5 |
+
import os
|
6 |
+
from pathlib import Path
|
7 |
+
|
8 |
+
import datasets
|
9 |
+
|
10 |
+
|
11 |
+
_URLS = {
|
12 |
+
"ccks2018_task3": "data/ccks2018_task3.jsonl",
|
13 |
+
"chinese_sts": "data/chinese_sts.jsonl",
|
14 |
+
"diac2019": "data/diac2019.jsonl",
|
15 |
+
"lcqmc": "data/lcqmc.jsonl",
|
16 |
+
|
17 |
+
}
|
18 |
+
|
19 |
+
|
20 |
+
_CITATION = """\
|
21 |
+
@dataset{sentence_pair,
|
22 |
+
author = {Xing Tian},
|
23 |
+
title = {sentence_pair},
|
24 |
+
month = sep,
|
25 |
+
year = 2023,
|
26 |
+
publisher = {Xing Tian},
|
27 |
+
version = {1.0},
|
28 |
+
}
|
29 |
+
"""
|
30 |
+
|
31 |
+
|
32 |
+
class SentencePair(datasets.GeneratorBasedBuilder):
|
33 |
+
VERSION = datasets.Version("1.0.0")
|
34 |
+
|
35 |
+
configs = list()
|
36 |
+
for name in _URLS.keys():
|
37 |
+
config = datasets.BuilderConfig(name=name, version=VERSION, description=name)
|
38 |
+
configs.append(config)
|
39 |
+
|
40 |
+
BUILDER_CONFIGS = [
|
41 |
+
*configs
|
42 |
+
]
|
43 |
+
|
44 |
+
def _info(self):
|
45 |
+
features = datasets.Features(
|
46 |
+
{
|
47 |
+
"sentence1": datasets.Value("string"),
|
48 |
+
"sentence2": datasets.Value("string"),
|
49 |
+
"label": datasets.Value("string"),
|
50 |
+
"data_source": datasets.Value("string"),
|
51 |
+
"split": datasets.Value("string"),
|
52 |
+
}
|
53 |
+
)
|
54 |
+
return datasets.DatasetInfo(
|
55 |
+
features=features,
|
56 |
+
supervised_keys=None,
|
57 |
+
homepage="",
|
58 |
+
license="",
|
59 |
+
citation=_CITATION,
|
60 |
+
)
|
61 |
+
|
62 |
+
def _split_generators(self, dl_manager):
|
63 |
+
"""Returns SplitGenerators."""
|
64 |
+
url = _URLS[self.config.name]
|
65 |
+
dl_path = dl_manager.download(url)
|
66 |
+
archive_path = dl_path
|
67 |
+
|
68 |
+
return [
|
69 |
+
datasets.SplitGenerator(
|
70 |
+
name=datasets.Split.TRAIN,
|
71 |
+
gen_kwargs={"archive_path": archive_path, "split": "train"},
|
72 |
+
),
|
73 |
+
datasets.SplitGenerator(
|
74 |
+
name=datasets.Split.VALIDATION,
|
75 |
+
gen_kwargs={"archive_path": archive_path, "split": "validation"},
|
76 |
+
),
|
77 |
+
datasets.SplitGenerator(
|
78 |
+
name=datasets.Split.TEST,
|
79 |
+
gen_kwargs={"archive_path": archive_path, "split": "test"},
|
80 |
+
),
|
81 |
+
]
|
82 |
+
|
83 |
+
def _generate_examples(self, archive_path, split):
|
84 |
+
"""Yields examples."""
|
85 |
+
archive_path = Path(archive_path)
|
86 |
+
|
87 |
+
idx = 0
|
88 |
+
|
89 |
+
with open(archive_path, "r", encoding="utf-8") as f:
|
90 |
+
for row in f:
|
91 |
+
sample = json.loads(row)
|
92 |
+
|
93 |
+
if sample["split"] != split:
|
94 |
+
continue
|
95 |
+
|
96 |
+
yield idx, {
|
97 |
+
"sentence1": sample["sentence1"],
|
98 |
+
"sentence2": sample["sentence2"],
|
99 |
+
"label": sample["label"],
|
100 |
+
"data_source": sample["data_source"],
|
101 |
+
"split": sample["split"],
|
102 |
+
}
|
103 |
+
idx += 1
|
104 |
+
|
105 |
+
|
106 |
+
if __name__ == '__main__':
|
107 |
+
pass
|