File size: 2,863 Bytes
f6b30cb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from glob import glob
import json
import os
from pathlib import Path
import datasets
_URLS = {
"ccks2018_task3": "data/ccks2018_task3.jsonl",
"chinese_sts": "data/chinese_sts.jsonl",
"diac2019": "data/diac2019.jsonl",
"lcqmc": "data/lcqmc.jsonl",
}
_CITATION = """\
@dataset{sentence_pair,
author = {Xing Tian},
title = {sentence_pair},
month = sep,
year = 2023,
publisher = {Xing Tian},
version = {1.0},
}
"""
class SentencePair(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
configs = list()
for name in _URLS.keys():
config = datasets.BuilderConfig(name=name, version=VERSION, description=name)
configs.append(config)
BUILDER_CONFIGS = [
*configs
]
def _info(self):
features = datasets.Features(
{
"sentence1": datasets.Value("string"),
"sentence2": datasets.Value("string"),
"label": datasets.Value("string"),
"data_source": datasets.Value("string"),
"split": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
features=features,
supervised_keys=None,
homepage="",
license="",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
url = _URLS[self.config.name]
dl_path = dl_manager.download(url)
archive_path = dl_path
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"archive_path": archive_path, "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"archive_path": archive_path, "split": "validation"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"archive_path": archive_path, "split": "test"},
),
]
def _generate_examples(self, archive_path, split):
"""Yields examples."""
archive_path = Path(archive_path)
idx = 0
with open(archive_path, "r", encoding="utf-8") as f:
for row in f:
sample = json.loads(row)
if sample["split"] != split:
continue
yield idx, {
"sentence1": sample["sentence1"],
"sentence2": sample["sentence2"],
"label": sample["label"],
"data_source": sample["data_source"],
"split": sample["split"],
}
idx += 1
if __name__ == '__main__':
pass
|