Datasets:
Tasks:
Text Classification
Sub-tasks:
natural-language-inference
Languages:
Japanese
Size:
10K - 100K
License:
:sparkles: Add the builder scritp
Browse files- janli.py +75 -0
- poetry.lock +0 -0
- pyproject.toml +23 -0
janli.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
|
3 |
+
import datasets as ds
|
4 |
+
|
5 |
+
_CITATION = """\
|
6 |
+
@InProceedings{yanaka-EtAl:2021:blackbox,
|
7 |
+
author = {Yanaka, Hitomi and Mineshima, Koji},
|
8 |
+
title = {Assessing the Generalization Capacity of Pre-trained Language Models through Japanese Adversarial Natural Language Inference},
|
9 |
+
booktitle = {Proceedings of the 2021 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP (BlackboxNLP2021)},
|
10 |
+
year = {2021},
|
11 |
+
}
|
12 |
+
"""
|
13 |
+
|
14 |
+
_DESCRIPTION = """\
|
15 |
+
|
16 |
+
"""
|
17 |
+
|
18 |
+
_HOMEPAGE = "https://github.com/verypluming/JaNLI"
|
19 |
+
|
20 |
+
_LICENSE = "CC BY-SA 4.0"
|
21 |
+
|
22 |
+
_DOWNLOAD_URL = "https://raw.githubusercontent.com/verypluming/JaNLI/main/janli.tsv"
|
23 |
+
|
24 |
+
|
25 |
+
class JaNLIDataset(ds.GeneratorBasedBuilder):
|
26 |
+
VERSION = ds.Version("1.0.0")
|
27 |
+
|
28 |
+
BUILDER_CONFIGS = [
|
29 |
+
ds.BuilderConfig(
|
30 |
+
name="default",
|
31 |
+
version=VERSION,
|
32 |
+
description=_DESCRIPTION,
|
33 |
+
),
|
34 |
+
]
|
35 |
+
|
36 |
+
def _info(self) -> ds.DatasetInfo:
|
37 |
+
features = ds.Features(
|
38 |
+
{
|
39 |
+
"id": ds.Value("int64"),
|
40 |
+
"sentence_A_Ja": ds.Value("string"),
|
41 |
+
"sentence_B_Ja": ds.Value("string"),
|
42 |
+
"entailment_label_Ja": ds.ClassLabel(names=["entailment", "non-entailment"]),
|
43 |
+
"heuristics": ds.Value("string"),
|
44 |
+
"number_of_NPs": ds.Value("int32"),
|
45 |
+
"semtag": ds.Value("string"),
|
46 |
+
}
|
47 |
+
)
|
48 |
+
return ds.DatasetInfo(
|
49 |
+
description=_DESCRIPTION,
|
50 |
+
citation=_CITATION,
|
51 |
+
homepage=_HOMEPAGE,
|
52 |
+
license=_LICENSE,
|
53 |
+
features=features,
|
54 |
+
)
|
55 |
+
|
56 |
+
def _split_generators(self, dl_manager: ds.DownloadManager):
|
57 |
+
data_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
|
58 |
+
df: pd.DataFrame = pd.read_table(data_path, header=0, sep="\t", index_col=0)
|
59 |
+
df["id"] = df.index
|
60 |
+
|
61 |
+
return [
|
62 |
+
ds.SplitGenerator(
|
63 |
+
name=ds.Split.TRAIN,
|
64 |
+
gen_kwargs={"df": df[df["split"] == "train"]},
|
65 |
+
),
|
66 |
+
ds.SplitGenerator(
|
67 |
+
name=ds.Split.TEST,
|
68 |
+
gen_kwargs={"df": df[df["split"] == "test"]},
|
69 |
+
),
|
70 |
+
]
|
71 |
+
|
72 |
+
def _generate_examples(self, df: pd.DataFrame):
|
73 |
+
df = df.drop("split", axis=1)
|
74 |
+
for i, row in enumerate(df.to_dict("records")):
|
75 |
+
yield i, row
|
poetry.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pyproject.toml
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.poetry]
|
2 |
+
name = "datasets-janli"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = ""
|
5 |
+
authors = ["hppRC <hpp.ricecake@gmail.com>"]
|
6 |
+
readme = "README.md"
|
7 |
+
packages = []
|
8 |
+
|
9 |
+
[tool.poetry.dependencies]
|
10 |
+
python = "^3.8.1"
|
11 |
+
datasets = "^2.11.0"
|
12 |
+
|
13 |
+
|
14 |
+
[tool.poetry.group.dev.dependencies]
|
15 |
+
black = "^22.12.0"
|
16 |
+
isort = "^5.11.4"
|
17 |
+
flake8 = "^6.0.0"
|
18 |
+
mypy = "^0.991"
|
19 |
+
pytest = "^7.2.0"
|
20 |
+
|
21 |
+
[build-system]
|
22 |
+
requires = ["poetry-core"]
|
23 |
+
build-backend = "poetry.core.masonry.api"
|