shunk031 commited on
Commit
50e79c3
1 Parent(s): f782a76

Add JCoLA dataset (#13)

Browse files

* [WIP] update README

* update script to load JCoLA

* update scripts

* update README

* update README

* update script

* update README

* fix for test

* update script

* update pyproject.toml

* update README

Files changed (4) hide show
  1. JGLUE.py +245 -71
  2. README.md +213 -43
  3. pyproject.toml +0 -1
  4. tests/JGLUE_test.py +16 -0
JGLUE.py CHANGED
@@ -3,7 +3,8 @@ import logging
3
  import random
4
  import string
5
  import warnings
6
- from typing import Dict, List, Optional, Union
 
7
 
8
  import datasets as ds
9
  import pandas as pd
@@ -11,7 +12,7 @@ from datasets.tasks import QuestionAnsweringExtractive
11
 
12
  logger = logging.getLogger(__name__)
13
 
14
- _CITATION = """\
15
  @inproceedings{kurihara-lrec-2022-jglue,
16
  title={JGLUE: Japanese general language understanding evaluation},
17
  author={Kurihara, Kentaro and Kawahara, Daisuke and Shibata, Tomohide},
@@ -20,7 +21,6 @@ _CITATION = """\
20
  year={2022},
21
  url={https://aclanthology.org/2022.lrec-1.317/}
22
  }
23
-
24
  @inproceedings{kurihara-nlp-2022-jglue,
25
  title={JGLUE: 日本語言語理解ベンチマーク},
26
  author={栗原健太郎 and 河原大輔 and 柴田知秀},
@@ -32,20 +32,63 @@ _CITATION = """\
32
  }
33
  """
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  _DESCRIPTION = """\
36
  JGLUE, Japanese General Language Understanding Evaluation, \
37
  is built to measure the general NLU ability in Japanese. JGLUE has been constructed \
38
  from scratch without translation. We hope that JGLUE will facilitate NLU research in Japanese.\
39
  """
40
 
41
- _HOMEPAGE = "https://github.com/yahoojapan/JGLUE"
 
 
42
 
43
- _LICENSE = """\
44
  This work is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License.\
45
  """
46
 
47
  _DESCRIPTION_CONFIGS = {
48
  "MARC-ja": "MARC-ja is a dataset of the text classification task. This dataset is based on the Japanese portion of Multilingual Amazon Reviews Corpus (MARC) (Keung+, 2020).",
 
49
  "JSTS": "JSTS is a Japanese version of the STS (Semantic Textual Similarity) dataset. STS is a task to estimate the semantic similarity of a sentence pair.",
50
  "JNLI": "JNLI is a Japanese version of the NLI (Natural Language Inference) dataset. NLI is a task to recognize the inference relation that a premise sentence has to a hypothesis sentence.",
51
  "JSQuAD": "JSQuAD is a Japanese version of SQuAD (Rajpurkar+, 2016), one of the datasets of reading comprehension.",
@@ -62,6 +105,22 @@ _URLS = {
62
  "valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/preprocess/marc-ja/data/label_conv_review_id_list/valid.txt",
63
  },
64
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  "JSTS": {
66
  "train": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsts-v1.1/train-v1.1.json",
67
  "valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsts-v1.1/valid-v1.1.json",
@@ -93,9 +152,9 @@ def dataset_info_jsts() -> ds.DatasetInfo:
93
  )
94
  return ds.DatasetInfo(
95
  description=_DESCRIPTION,
96
- citation=_CITATION,
97
- homepage=_HOMEPAGE,
98
- license=_LICENSE,
99
  features=features,
100
  )
101
 
@@ -114,9 +173,9 @@ def dataset_info_jnli() -> ds.DatasetInfo:
114
  )
115
  return ds.DatasetInfo(
116
  description=_DESCRIPTION,
117
- citation=_CITATION,
118
- homepage=_HOMEPAGE,
119
- license=_LICENSE,
120
  features=features,
121
  supervised_keys=None,
122
  )
@@ -137,9 +196,9 @@ def dataset_info_jsquad() -> ds.DatasetInfo:
137
  )
138
  return ds.DatasetInfo(
139
  description=_DESCRIPTION,
140
- citation=_CITATION,
141
- homepage=_HOMEPAGE,
142
- license=_LICENSE,
143
  features=features,
144
  supervised_keys=None,
145
  task_templates=[
@@ -170,9 +229,47 @@ def dataset_info_jcommonsenseqa() -> ds.DatasetInfo:
170
  )
171
  return ds.DatasetInfo(
172
  description=_DESCRIPTION,
173
- citation=_CITATION,
174
- homepage=_HOMEPAGE,
175
- license=_LICENSE,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
  features=features,
177
  )
178
 
@@ -189,53 +286,43 @@ def dataset_info_marc_ja() -> ds.DatasetInfo:
189
  )
190
  return ds.DatasetInfo(
191
  description=_DESCRIPTION,
192
- citation=_CITATION,
193
- homepage=_HOMEPAGE,
194
- license=_LICENSE,
195
  features=features,
196
  )
197
 
198
 
199
- class MarcJaConfig(ds.BuilderConfig):
200
- def __init__(
201
- self,
202
- name: str = "MARC-ja",
203
- is_han_to_zen: bool = False,
204
- max_instance_num: Optional[int] = None,
205
- max_char_length: int = 500,
206
- is_pos_neg: bool = True,
207
- train_ratio: float = 0.94,
208
- val_ratio: float = 0.03,
209
- test_ratio: float = 0.03,
210
- output_testset: bool = False,
211
- filter_review_id_list_valid: bool = True,
212
- label_conv_review_id_list_valid: bool = True,
213
- version: Optional[Union[ds.utils.Version, str]] = ds.utils.Version("0.0.0"),
214
- data_dir: Optional[str] = None,
215
- data_files: Optional[ds.data_files.DataFilesDict] = None,
216
- description: Optional[str] = None,
217
- ) -> None:
218
- super().__init__(
219
- name=name,
220
- version=version,
221
- data_dir=data_dir,
222
- data_files=data_files,
223
- description=description,
224
- )
225
- assert train_ratio + val_ratio + test_ratio == 1.0
226
 
227
- self.train_ratio = train_ratio
228
- self.val_ratio = val_ratio
229
- self.test_ratio = test_ratio
230
 
231
- self.is_han_to_zen = is_han_to_zen
232
- self.max_instance_num = max_instance_num
233
- self.max_char_length = max_char_length
234
- self.is_pos_neg = is_pos_neg
235
- self.output_testset = output_testset
 
 
 
 
 
 
 
 
236
 
237
- self.filter_review_id_list_valid = filter_review_id_list_valid
238
- self.label_conv_review_id_list_valid = label_conv_review_id_list_valid
 
 
 
 
 
 
 
 
 
239
 
240
 
241
  def get_label(rating: int, is_pos_neg: bool = False) -> Optional[str]:
@@ -451,31 +538,39 @@ def preprocess_for_marc_ja(
451
 
452
 
453
  class JGLUE(ds.GeneratorBasedBuilder):
454
- VERSION = ds.Version("1.1.0")
 
 
 
455
  BUILDER_CONFIGS = [
456
  MarcJaConfig(
457
  name="MARC-ja",
458
- version=VERSION,
459
  description=_DESCRIPTION_CONFIGS["MARC-ja"],
460
  ),
461
- ds.BuilderConfig(
 
 
 
 
 
462
  name="JSTS",
463
- version=VERSION,
464
  description=_DESCRIPTION_CONFIGS["JSTS"],
465
  ),
466
- ds.BuilderConfig(
467
  name="JNLI",
468
- version=VERSION,
469
  description=_DESCRIPTION_CONFIGS["JNLI"],
470
  ),
471
- ds.BuilderConfig(
472
  name="JSQuAD",
473
- version=VERSION,
474
  description=_DESCRIPTION_CONFIGS["JSQuAD"],
475
  ),
476
- ds.BuilderConfig(
477
  name="JCommonsenseQA",
478
- version=VERSION,
479
  description=_DESCRIPTION_CONFIGS["JCommonsenseQA"],
480
  ),
481
  ]
@@ -489,6 +584,8 @@ class JGLUE(ds.GeneratorBasedBuilder):
489
  return dataset_info_jsquad()
490
  elif self.config.name == "JCommonsenseQA":
491
  return dataset_info_jcommonsenseqa()
 
 
492
  elif self.config.name == "MARC-ja":
493
  return dataset_info_marc_ja()
494
  else:
@@ -522,24 +619,49 @@ class JGLUE(ds.GeneratorBasedBuilder):
522
 
523
  return [
524
  ds.SplitGenerator(
525
- name=ds.Split.TRAIN, # type: ignore
526
  gen_kwargs={"split_df": split_dfs["train"]},
527
  ),
528
  ds.SplitGenerator(
529
- name=ds.Split.VALIDATION, # type: ignore
530
  gen_kwargs={"split_df": split_dfs["valid"]},
531
  ),
532
  ]
533
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
534
  def __split_generators(self, dl_manager: ds.DownloadManager):
535
  file_paths = dl_manager.download_and_extract(_URLS[self.config.name])
 
536
  return [
537
  ds.SplitGenerator(
538
- name=ds.Split.TRAIN, # type: ignore
539
  gen_kwargs={"file_path": file_paths["train"]},
540
  ),
541
  ds.SplitGenerator(
542
- name=ds.Split.VALIDATION, # type: ignore
543
  gen_kwargs={"file_path": file_paths["valid"]},
544
  ),
545
  ]
@@ -547,6 +669,8 @@ class JGLUE(ds.GeneratorBasedBuilder):
547
  def _split_generators(self, dl_manager: ds.DownloadManager):
548
  if self.config.name == "MARC-ja":
549
  return self.__split_generators_marc_ja(dl_manager)
 
 
550
  else:
551
  return self.__split_generators(dl_manager)
552
 
@@ -558,6 +682,53 @@ class JGLUE(ds.GeneratorBasedBuilder):
558
  for i, data_dict in enumerate(instances):
559
  yield i, data_dict
560
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
561
  def __generate_examples_jsquad(self, file_path: Optional[str] = None):
562
  if file_path is None:
563
  raise ValueError(f"Invalid argument for {self.config.name}")
@@ -617,6 +788,9 @@ class JGLUE(ds.GeneratorBasedBuilder):
617
  if self.config.name == "MARC-ja":
618
  yield from self.__generate_examples_marc_ja(split_df)
619
 
 
 
 
620
  elif self.config.name == "JSQuAD":
621
  yield from self.__generate_examples_jsquad(file_path)
622
 
 
3
  import random
4
  import string
5
  import warnings
6
+ from dataclasses import dataclass
7
+ from typing import Dict, List, Literal, Optional
8
 
9
  import datasets as ds
10
  import pandas as pd
 
12
 
13
  logger = logging.getLogger(__name__)
14
 
15
+ _JGLUE_CITATION = """\
16
  @inproceedings{kurihara-lrec-2022-jglue,
17
  title={JGLUE: Japanese general language understanding evaluation},
18
  author={Kurihara, Kentaro and Kawahara, Daisuke and Shibata, Tomohide},
 
21
  year={2022},
22
  url={https://aclanthology.org/2022.lrec-1.317/}
23
  }
 
24
  @inproceedings{kurihara-nlp-2022-jglue,
25
  title={JGLUE: 日本語言語理解ベンチマーク},
26
  author={栗原健太郎 and 河原大輔 and 柴田知秀},
 
32
  }
33
  """
34
 
35
+ _JCOLA_CITATION = """\
36
+ @article{someya2023jcola,
37
+ title={JCoLA: Japanese Corpus of Linguistic Acceptability},
38
+ author={Taiga Someya and Yushi Sugimoto and Yohei Oseki},
39
+ year={2023},
40
+ eprint={2309.12676},
41
+ archivePrefix={arXiv},
42
+ primaryClass={cs.CL}
43
+ }
44
+ @inproceedings{someya-nlp-2022-jcola,
45
+ title={日本語版 CoLA の構築},
46
+ author={染谷 大河 and 大関 洋平},
47
+ booktitle={言語処理学会第28回年次大会},
48
+ pages={1872--1877},
49
+ year={2022},
50
+ url={https://www.anlp.jp/proceedings/annual_meeting/2022/pdf_dir/E7-1.pdf},
51
+ note={in Japanese}
52
+ }
53
+ """
54
+
55
+ _MARC_JA_CITATION = """\
56
+ @inproceedings{marc_reviews,
57
+ title={The Multilingual Amazon Reviews Corpus},
58
+ author={Keung, Phillip and Lu, Yichao and Szarvas, György and Smith, Noah A.},
59
+ booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing},
60
+ pages={4563--4568},
61
+ year={2020}
62
+ }
63
+ """
64
+
65
+ _JSTS_JNLI_CITATION = """\
66
+ @inproceedings{miyazaki2016cross,
67
+ title={Cross-lingual image caption generation},
68
+ author={Miyazaki, Takashi and Shimizu, Nobuyuki},
69
+ booktitle={Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
70
+ pages={1780--1790},
71
+ year={2016}
72
+ }
73
+ """
74
+
75
  _DESCRIPTION = """\
76
  JGLUE, Japanese General Language Understanding Evaluation, \
77
  is built to measure the general NLU ability in Japanese. JGLUE has been constructed \
78
  from scratch without translation. We hope that JGLUE will facilitate NLU research in Japanese.\
79
  """
80
 
81
+ _JGLUE_HOMEPAGE = "https://github.com/yahoojapan/JGLUE"
82
+ _JCOLA_HOMEPAGE = "https://github.com/osekilab/JCoLA"
83
+ _MARC_JA_HOMEPAGE = "https://registry.opendata.aws/amazon-reviews-ml/"
84
 
85
+ _JGLUE_LICENSE = """\
86
  This work is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License.\
87
  """
88
 
89
  _DESCRIPTION_CONFIGS = {
90
  "MARC-ja": "MARC-ja is a dataset of the text classification task. This dataset is based on the Japanese portion of Multilingual Amazon Reviews Corpus (MARC) (Keung+, 2020).",
91
+ "JCoLA": "JCoLA (Japanese Corpus of Linguistic Accept010 ability) is a novel dataset for targeted syntactic evaluations of language models in Japanese, which consists of 10,020 sentences with acceptability judgments by linguists.",
92
  "JSTS": "JSTS is a Japanese version of the STS (Semantic Textual Similarity) dataset. STS is a task to estimate the semantic similarity of a sentence pair.",
93
  "JNLI": "JNLI is a Japanese version of the NLI (Natural Language Inference) dataset. NLI is a task to recognize the inference relation that a premise sentence has to a hypothesis sentence.",
94
  "JSQuAD": "JSQuAD is a Japanese version of SQuAD (Rajpurkar+, 2016), one of the datasets of reading comprehension.",
 
105
  "valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/preprocess/marc-ja/data/label_conv_review_id_list/valid.txt",
106
  },
107
  },
108
+ "JCoLA": {
109
+ "train": {
110
+ "in_domain": {
111
+ "json": "https://raw.githubusercontent.com/osekilab/JCoLA/main/data/jcola-v1.0/in_domain_train-v1.0.json",
112
+ }
113
+ },
114
+ "valid": {
115
+ "in_domain": {
116
+ "json": "https://raw.githubusercontent.com/osekilab/JCoLA/main/data/jcola-v1.0/in_domain_valid-v1.0.json",
117
+ },
118
+ "out_of_domain": {
119
+ "json": "https://raw.githubusercontent.com/osekilab/JCoLA/main/data/jcola-v1.0/out_of_domain_valid-v1.0.json",
120
+ "json_annotated": "https://raw.githubusercontent.com/osekilab/JCoLA/main/data/jcola-v1.0/out_of_domain_valid_annotated-v1.0.json",
121
+ },
122
+ },
123
+ },
124
  "JSTS": {
125
  "train": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsts-v1.1/train-v1.1.json",
126
  "valid": "https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsts-v1.1/valid-v1.1.json",
 
152
  )
153
  return ds.DatasetInfo(
154
  description=_DESCRIPTION,
155
+ citation=_JGLUE_CITATION,
156
+ homepage=f"{_JSTS_JNLI_CITATION}\n{_JGLUE_HOMEPAGE}",
157
+ license=_JGLUE_LICENSE,
158
  features=features,
159
  )
160
 
 
173
  )
174
  return ds.DatasetInfo(
175
  description=_DESCRIPTION,
176
+ citation=_JGLUE_CITATION,
177
+ homepage=f"{_JSTS_JNLI_CITATION}\n{_JGLUE_HOMEPAGE}",
178
+ license=_JGLUE_LICENSE,
179
  features=features,
180
  supervised_keys=None,
181
  )
 
196
  )
197
  return ds.DatasetInfo(
198
  description=_DESCRIPTION,
199
+ citation=_JGLUE_CITATION,
200
+ homepage=_JGLUE_HOMEPAGE,
201
+ license=_JGLUE_LICENSE,
202
  features=features,
203
  supervised_keys=None,
204
  task_templates=[
 
229
  )
230
  return ds.DatasetInfo(
231
  description=_DESCRIPTION,
232
+ citation=_JGLUE_CITATION,
233
+ homepage=_JGLUE_HOMEPAGE,
234
+ license=_JGLUE_LICENSE,
235
+ features=features,
236
+ )
237
+
238
+
239
+ def dataset_info_jcola() -> ds.DatasetInfo:
240
+ features = ds.Features(
241
+ {
242
+ "uid": ds.Value("int64"),
243
+ "source": ds.Value("string"),
244
+ "label": ds.ClassLabel(
245
+ num_classes=2,
246
+ names=["unacceptable", "acceptable"],
247
+ ),
248
+ "diacritic": ds.Value("string"),
249
+ "sentence": ds.Value("string"),
250
+ "original": ds.Value("string"),
251
+ "translation": ds.Value("string"),
252
+ "gloss": ds.Value("bool"),
253
+ "linguistic_phenomenon": {
254
+ "argument_structure": ds.Value("bool"),
255
+ "binding": ds.Value("bool"),
256
+ "control_raising": ds.Value("bool"),
257
+ "ellipsis": ds.Value("bool"),
258
+ "filler_gap": ds.Value("bool"),
259
+ "island_effects": ds.Value("bool"),
260
+ "morphology": ds.Value("bool"),
261
+ "nominal_structure": ds.Value("bool"),
262
+ "negative_polarity_concord_items": ds.Value("bool"),
263
+ "quantifier": ds.Value("bool"),
264
+ "verbal_agreement": ds.Value("bool"),
265
+ "simple": ds.Value("bool"),
266
+ },
267
+ }
268
+ )
269
+ return ds.DatasetInfo(
270
+ description=_DESCRIPTION,
271
+ citation=f"{_JCOLA_CITATION}\n{_JGLUE_CITATION}",
272
+ homepage=_JCOLA_HOMEPAGE,
273
  features=features,
274
  )
275
 
 
286
  )
287
  return ds.DatasetInfo(
288
  description=_DESCRIPTION,
289
+ citation=f"{_MARC_JA_CITATION}\n{_JGLUE_CITATION}",
290
+ homepage=_MARC_JA_HOMEPAGE,
291
+ license=_JGLUE_LICENSE,
292
  features=features,
293
  )
294
 
295
 
296
+ @dataclass
297
+ class JGLUEConfig(ds.BuilderConfig):
298
+ """Class for JGLUE benchmark configuration"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
 
 
 
 
300
 
301
+ @dataclass
302
+ class MarcJaConfig(JGLUEConfig):
303
+ name: str = "MARC-ja"
304
+ is_han_to_zen: bool = False
305
+ max_instance_num: Optional[int] = None
306
+ max_char_length: int = 500
307
+ is_pos_neg: bool = True
308
+ train_ratio: float = 0.94
309
+ val_ratio: float = 0.03
310
+ test_ratio: float = 0.03
311
+ output_testset: bool = False
312
+ filter_review_id_list_valid: bool = True
313
+ label_conv_review_id_list_valid: bool = True
314
 
315
+ def __post_init__(self) -> None:
316
+ assert self.train_ratio + self.val_ratio + self.test_ratio == 1.0
317
+
318
+
319
+ JcolaDomain = Literal["in_domain", "out_of_domain"]
320
+
321
+
322
+ @dataclass
323
+ class JcolaConfig(JGLUEConfig):
324
+ name: str = "JCoLA"
325
+ domain: JcolaDomain = "in_domain"
326
 
327
 
328
  def get_label(rating: int, is_pos_neg: bool = False) -> Optional[str]:
 
538
 
539
 
540
  class JGLUE(ds.GeneratorBasedBuilder):
541
+ JGLUE_VERSION = ds.Version("1.1.0")
542
+ JCOLA_VERSION = ds.Version("1.0.0")
543
+
544
+ BUILDER_CONFIG_CLASS = JGLUEConfig
545
  BUILDER_CONFIGS = [
546
  MarcJaConfig(
547
  name="MARC-ja",
548
+ version=JGLUE_VERSION,
549
  description=_DESCRIPTION_CONFIGS["MARC-ja"],
550
  ),
551
+ JcolaConfig(
552
+ name="JCoLA",
553
+ version=JCOLA_VERSION,
554
+ description=_DESCRIPTION_CONFIGS["JCoLA"],
555
+ ),
556
+ JGLUEConfig(
557
  name="JSTS",
558
+ version=JGLUE_VERSION,
559
  description=_DESCRIPTION_CONFIGS["JSTS"],
560
  ),
561
+ JGLUEConfig(
562
  name="JNLI",
563
+ version=JGLUE_VERSION,
564
  description=_DESCRIPTION_CONFIGS["JNLI"],
565
  ),
566
+ JGLUEConfig(
567
  name="JSQuAD",
568
+ version=JGLUE_VERSION,
569
  description=_DESCRIPTION_CONFIGS["JSQuAD"],
570
  ),
571
+ JGLUEConfig(
572
  name="JCommonsenseQA",
573
+ version=JGLUE_VERSION,
574
  description=_DESCRIPTION_CONFIGS["JCommonsenseQA"],
575
  ),
576
  ]
 
584
  return dataset_info_jsquad()
585
  elif self.config.name == "JCommonsenseQA":
586
  return dataset_info_jcommonsenseqa()
587
+ elif self.config.name == "JCoLA":
588
+ return dataset_info_jcola()
589
  elif self.config.name == "MARC-ja":
590
  return dataset_info_marc_ja()
591
  else:
 
619
 
620
  return [
621
  ds.SplitGenerator(
622
+ name=ds.Split.TRAIN,
623
  gen_kwargs={"split_df": split_dfs["train"]},
624
  ),
625
  ds.SplitGenerator(
626
+ name=ds.Split.VALIDATION,
627
  gen_kwargs={"split_df": split_dfs["valid"]},
628
  ),
629
  ]
630
 
631
+ def __split_generators_jcola(self, dl_manager: ds.DownloadManager):
632
+ file_paths = dl_manager.download_and_extract(_URLS[self.config.name])
633
+
634
+ return [
635
+ ds.SplitGenerator(
636
+ name=ds.Split.TRAIN,
637
+ gen_kwargs={"file_path": file_paths["train"]["in_domain"]["json"]},
638
+ ),
639
+ ds.SplitGenerator(
640
+ name=ds.Split.VALIDATION,
641
+ gen_kwargs={"file_path": file_paths["valid"]["in_domain"]["json"]},
642
+ ),
643
+ ds.SplitGenerator(
644
+ name=ds.NamedSplit("validation_out_of_domain"),
645
+ gen_kwargs={"file_path": file_paths["valid"]["out_of_domain"]["json"]},
646
+ ),
647
+ ds.SplitGenerator(
648
+ name=ds.NamedSplit("validation_out_of_domain_annotated"),
649
+ gen_kwargs={
650
+ "file_path": file_paths["valid"]["out_of_domain"]["json_annotated"]
651
+ },
652
+ ),
653
+ ]
654
+
655
  def __split_generators(self, dl_manager: ds.DownloadManager):
656
  file_paths = dl_manager.download_and_extract(_URLS[self.config.name])
657
+
658
  return [
659
  ds.SplitGenerator(
660
+ name=ds.Split.TRAIN,
661
  gen_kwargs={"file_path": file_paths["train"]},
662
  ),
663
  ds.SplitGenerator(
664
+ name=ds.Split.VALIDATION,
665
  gen_kwargs={"file_path": file_paths["valid"]},
666
  ),
667
  ]
 
669
  def _split_generators(self, dl_manager: ds.DownloadManager):
670
  if self.config.name == "MARC-ja":
671
  return self.__split_generators_marc_ja(dl_manager)
672
+ elif self.config.name == "JCoLA":
673
+ return self.__split_generators_jcola(dl_manager)
674
  else:
675
  return self.__split_generators(dl_manager)
676
 
 
682
  for i, data_dict in enumerate(instances):
683
  yield i, data_dict
684
 
685
+ def __generate_examples_jcola(self, file_path: Optional[str] = None):
686
+ if file_path is None:
687
+ raise ValueError(f"Invalid argument for {self.config.name}")
688
+
689
+ def convert_label(json_dict):
690
+ label_int = json_dict["label"]
691
+ label_str = "unacceptable" if label_int == 0 else "acceptable"
692
+ json_dict["label"] = label_str
693
+ return json_dict
694
+
695
+ def convert_addntional_info(json_dict):
696
+ json_dict["translation"] = json_dict.get("translation")
697
+ json_dict["gloss"] = json_dict.get("gloss")
698
+ return json_dict
699
+
700
+ def convert_phenomenon(json_dict):
701
+ argument_structure = json_dict.get("Arg. Str.")
702
+
703
+ def json_pop(key):
704
+ return json_dict.pop(key) if argument_structure is not None else None
705
+
706
+ json_dict["linguistic_phenomenon"] = {
707
+ "argument_structure": json_pop("Arg. Str."),
708
+ "binding": json_pop("binding"),
709
+ "control_raising": json_pop("control/raising"),
710
+ "ellipsis": json_pop("ellipsis"),
711
+ "filler_gap": json_pop("filler-gap"),
712
+ "island_effects": json_pop("island effects"),
713
+ "morphology": json_pop("morphology"),
714
+ "nominal_structure": json_pop("nominal structure"),
715
+ "negative_polarity_concord_items": json_pop("NPI/NCI"),
716
+ "quantifier": json_pop("quantifier"),
717
+ "verbal_agreement": json_pop("verbal agr."),
718
+ "simple": json_pop("simple"),
719
+ }
720
+ return json_dict
721
+
722
+ with open(file_path, "r", encoding="utf-8") as rf:
723
+ for i, line in enumerate(rf):
724
+ json_dict = json.loads(line)
725
+
726
+ example = convert_label(json_dict)
727
+ example = convert_addntional_info(example)
728
+ example = convert_phenomenon(example)
729
+
730
+ yield i, example
731
+
732
  def __generate_examples_jsquad(self, file_path: Optional[str] = None):
733
  if file_path is None:
734
  raise ValueError(f"Invalid argument for {self.config.name}")
 
788
  if self.config.name == "MARC-ja":
789
  yield from self.__generate_examples_marc_ja(split_df)
790
 
791
+ elif self.config.name == "JCoLA":
792
+ yield from self.__generate_examples_jcola(file_path)
793
+
794
  elif self.config.name == "JSQuAD":
795
  yield from self.__generate_examples_jsquad(file_path)
796
 
README.md CHANGED
@@ -16,6 +16,7 @@ source_datasets:
16
  - original
17
  tags:
18
  - MARC
 
19
  - STS
20
  - NLI
21
  - SQuAD
@@ -72,7 +73,7 @@ Please feel free to open an [issue](https://github.com/shunk031/huggingface-data
72
 
73
  ### Dataset Summary
74
 
75
- From [the official README.md](https://github.com/yahoojapan/JGLUE#jglue-japanese-general-language-understanding-evaluation):
76
 
77
  > JGLUE, Japanese General Language Understanding Evaluation, is built to measure the general NLU ability in Japanese. JGLUE has been constructed from scratch without translation. We hope that JGLUE will facilitate NLU research in Japanese.
78
 
@@ -80,7 +81,7 @@ From [the official README.md](https://github.com/yahoojapan/JGLUE#jglue-japanese
80
 
81
  ### Supported Tasks and Leaderboards
82
 
83
- From [the official README.md](https://github.com/yahoojapan/JGLUE#tasksdatasets):
84
 
85
  > JGLUE consists of the tasks of text classification, sentence pair classification, and QA. Each task consists of multiple datasets.
86
 
@@ -88,37 +89,43 @@ From [the official README.md](https://github.com/yahoojapan/JGLUE#tasksdatasets)
88
 
89
  ##### MARC-ja
90
 
91
- From [the official README.md](https://github.com/yahoojapan/JGLUE#marc-ja):
92
 
93
  > MARC-ja is a dataset of the text classification task. This dataset is based on the Japanese portion of [Multilingual Amazon Reviews Corpus (MARC)](https://docs.opendata.aws/amazon-reviews-ml/readme.html) ([Keung+, 2020](https://aclanthology.org/2020.emnlp-main.369/)).
94
 
 
 
 
 
 
 
95
  ##### JSTS
96
 
97
- From [the official README.md](https://github.com/yahoojapan/JGLUE#jsts):
98
 
99
  > JSTS is a Japanese version of the STS (Semantic Textual Similarity) dataset. STS is a task to estimate the semantic similarity of a sentence pair. The sentences in JSTS and JNLI (described below) are extracted from the Japanese version of the MS COCO Caption Dataset, [the YJ Captions Dataset](https://github.com/yahoojapan/YJCaptions) ([Miyazaki and Shimizu, 2016](https://aclanthology.org/P16-1168/)).
100
 
101
  ##### JNLI
102
 
103
- From [the official README.md](https://github.com/yahoojapan/JGLUE#jnli):
104
 
105
  > JNLI is a Japanese version of the NLI (Natural Language Inference) dataset. NLI is a task to recognize the inference relation that a premise sentence has to a hypothesis sentence. The inference relations are entailment, contradiction, and neutral.
106
 
107
  ##### JSQuAD
108
 
109
- From [the official README.md](https://github.com/yahoojapan/JGLUE#jsquad):
110
 
111
  > JSQuAD is a Japanese version of [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) ([Rajpurkar+, 2018](https://aclanthology.org/P18-2124/)), one of the datasets of reading comprehension. Each instance in the dataset consists of a question regarding a given context (Wikipedia article) and its answer. JSQuAD is based on SQuAD 1.1 (there are no unanswerable questions). We used [the Japanese Wikipedia dump](https://dumps.wikimedia.org/jawiki/) as of 20211101.
112
 
113
  ##### JCommonsenseQA
114
 
115
- From [the official README.md](https://github.com/yahoojapan/JGLUE#jcommonsenseqa):
116
 
117
  > JCommonsenseQA is a Japanese version of [CommonsenseQA](https://www.tau-nlp.org/commonsenseqa) ([Talmor+, 2019](https://aclanthology.org/N19-1421/)), which is a multiple-choice question answering dataset that requires commonsense reasoning ability. It is built using crowdsourcing with seeds extracted from the knowledge base [ConceptNet](https://conceptnet.io/).
118
 
119
  #### Leaderboard
120
 
121
- From [the official README.md](https://github.com/yahoojapan/JGLUE#leaderboard):
122
 
123
  > A leaderboard will be made public soon. The test set will be released at that time.
124
 
@@ -152,6 +159,63 @@ print(dataset)
152
  # })
153
  ```
154
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  #### JSTS
156
 
157
  ```python
@@ -299,7 +363,7 @@ An example of the JCommonsenseQA looks as follows:
299
 
300
  ##### Explanation for `yjcaptions_id`
301
 
302
- From [the official README.md](https://github.com/yahoojapan/JGLUE#explanation-for-yjcaptions_id), there are the following two cases:
303
 
304
  1. sentence pairs in one image: `(image id)-(sentence1 id)-(sentence2 id)`
305
  - e.g., 723-844-847
@@ -307,6 +371,32 @@ From [the official README.md](https://github.com/yahoojapan/JGLUE#explanation-fo
307
  2. sentence pairs in two images: `(image id of sentence1)_(image id of sentence2)-(sentence1 id)-(sentence2 id)`
308
  - e.g., 91337_217583-96105-91680
309
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
  #### JNLI
311
 
312
  - `sentence_pair_id`: ID of the sentence pair
@@ -337,26 +427,30 @@ From [the official README.md](https://github.com/yahoojapan/JGLUE#explanation-fo
337
 
338
  ### Data Splits
339
 
340
- From [the official README.md](https://github.com/yahoojapan/JGLUE/blob/main/README.md#tasksdatasets):
341
 
342
  > Only train/dev sets are available now, and the test set will be available after the leaderboard is made public.
343
 
 
 
 
 
344
  | Task | Dataset | Train | Dev | Test |
345
  |------------------------------|----------------|--------:|------:|------:|
346
  | Text Classification | MARC-ja | 187,528 | 5,654 | 5,639 |
347
- | | JCoLA† | - | - | - |
348
  | Sentence Pair Classification | JSTS | 12,451 | 1,457 | 1,589 |
349
  | | JNLI | 20,073 | 2,434 | 2,508 |
350
  | Question Answering | JSQuAD | 62,859 | 4,442 | 4,420 |
351
  | | JCommonsenseQA | 8,939 | 1,119 | 1,118 |
352
 
353
- > †JCoLA will be added soon.
354
 
355
  ## Dataset Creation
356
 
357
  ### Curation Rationale
358
 
359
- From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
360
 
361
  > JGLUE is designed to cover a wide range of GLUE and SuperGLUE tasks and consists of three kinds of tasks: text classification, sentence pair classification, and question answering.
362
 
@@ -368,7 +462,7 @@ From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
368
 
369
  #### Who are the source language producers?
370
 
371
- - The source language producers are users of Amazon (MARC-ja), crowd-workers of Yahoo! Crowdsourcing (JSTS, JNLI and JCommonsenseQA), writers of the Japanese Wikipedia (JSQuAD).
372
 
373
  ### Annotations
374
 
@@ -376,7 +470,7 @@ From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
376
 
377
  ##### MARC-ja
378
 
379
- From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
380
 
381
  > As one of the text classification datasets, we build a dataset based on the Multilingual Amazon Reviews Corpus (MARC) (Keung et al., 2020). MARC is a multilingual corpus of product reviews with 5-level star ratings (1-5) on the Amazon shopping site. This corpus covers six languages, including English and Japanese. For JGLUE, we use the Japanese part of MARC and to make it easy for both humans and computers to judge a class label, we cast the text classification task as a binary classification task, where 1- and 2-star ratings are converted to “negative”, and 4 and 5 are converted to “positive”. We do not use reviews with a 3-star rating.
382
 
@@ -384,9 +478,19 @@ From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
384
 
385
  > We obtained 5,654 and 5,639 instances for the dev and test data, respectively, through the above procedure. For the training data, we extracted 187,528 instances directly from MARC without performing the cleaning procedure because of the large number of training instances. The statistics of MARC-ja are listed in Table 2. For the evaluation metric for MARC-ja, we use accuracy because it is a binary classification task of texts.
386
 
 
 
 
 
 
 
 
 
 
 
387
  ##### JSTS and JNLI
388
 
389
- From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
390
 
391
  > For the sentence pair classification datasets, we construct a semantic textual similarity (STS) dataset, JSTS, and a natural language inference (NLI) dataset, JNLI.
392
 
@@ -411,7 +515,7 @@ From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
411
 
412
  ##### JSQuAD
413
 
414
- From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
415
 
416
  > As QA datasets, we build a Japanese version of SQuAD (Rajpurkar et al., 2016), one of the datasets of reading comprehension, and a Japanese version ofCommonsenseQA, which is explained in the next section.
417
 
@@ -423,7 +527,7 @@ From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
423
 
424
  ##### JCommonsenseQA
425
 
426
- From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
427
 
428
  > ### Overview
429
  > JCommonsenseQA is a Japanese version of CommonsenseQA (Talmor et al., 2019), which consists of five choice QA to evaluate commonsense reasoning ability. Figure 3 shows examples of JCommonsenseQA. In the same way as CommonsenseQA, JCommonsenseQA is built using crowdsourcing with seeds extracted from the knowledge base ConceptNet (Speer et al., 2017). ConceptNet is a multilingual knowledge base that consists of triplets of two concepts and their relation. The triplets are directional and represented as (source concept, relation, target concept), for example (bullet train, AtLocation, station).
@@ -446,10 +550,14 @@ From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
446
 
447
  #### Who are the annotators?
448
 
449
- From [the official README.md](https://github.com/yahoojapan/JGLUE/blob/main/README.md#tasksdatasets):
450
 
451
  > We use Yahoo! Crowdsourcing for all crowdsourcing tasks in constructing the datasets.
452
 
 
 
 
 
453
  ### Personal and Sensitive Information
454
 
455
  [More Information Needed]
@@ -458,7 +566,7 @@ From [the official README.md](https://github.com/yahoojapan/JGLUE/blob/main/READ
458
 
459
  ### Social Impact of Dataset
460
 
461
- From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
462
 
463
  > We build a Japanese NLU benchmark, JGLUE, from scratch without translation to measure the general NLU ability in Japanese. We hope that JGLUE will facilitate NLU research in Japanese.
464
 
@@ -468,7 +576,9 @@ From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
468
 
469
  ### Other Known Limitations
470
 
471
- [More Information Needed]
 
 
472
 
473
  ## Additional Information
474
 
@@ -480,13 +590,17 @@ From [the original paper](https://aclanthology.org/2022.lrec-1.317/):
480
 
481
  - Keung, Phillip, et al. "The Multilingual Amazon Reviews Corpus." Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP). 2020.
482
 
 
 
 
 
483
  #### JSTS and JNLI
484
 
485
  - Miyazaki, Takashi, and Nobuyuki Shimizu. "Cross-lingual image caption generation." Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 2016.
486
 
487
  #### JSQuAD
488
 
489
- The authors curated the original data for JSQuAD from the Japanese wikipedia dump.
490
 
491
  #### JCommonsenseQA
492
 
@@ -494,38 +608,94 @@ In the same way as CommonsenseQA, JCommonsenseQA is built using crowdsourcing wi
494
 
495
  ### Licensing Information
496
 
 
 
 
 
497
  > This work is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License.
498
 
 
 
 
 
 
 
499
  ### Citation Information
500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
501
  ```bibtex
502
- @inproceedings{kurihara-etal-2022-jglue,
503
- title = "{JGLUE}: {J}apanese General Language Understanding Evaluation",
504
- author = "Kurihara, Kentaro and
505
- Kawahara, Daisuke and
506
- Shibata, Tomohide",
507
- booktitle = "Proceedings of the Thirteenth Language Resources and Evaluation Conference",
508
- month = jun,
509
- year = "2022",
510
- address = "Marseille, France",
511
- publisher = "European Language Resources Association",
512
- url = "https://aclanthology.org/2022.lrec-1.317",
513
- pages = "2957--2966",
514
- abstract = "To develop high-performance natural language understanding (NLU) models, it is necessary to have a benchmark to evaluate and analyze NLU ability from various perspectives. While the English NLU benchmark, GLUE, has been the forerunner, benchmarks are now being released for languages other than English, such as CLUE for Chinese and FLUE for French; but there is no such benchmark for Japanese. We build a Japanese NLU benchmark, JGLUE, from scratch without translation to measure the general NLU ability in Japanese. We hope that JGLUE will facilitate NLU research in Japanese.",
515
  }
516
  ```
517
 
 
 
518
  ```bibtex
519
- @InProceedings{Kurihara_nlp2022,
520
- author = "栗原健太郎 and 河原大輔 and 柴田知秀",
521
- title = "JGLUE: 日本語言語理解ベンチマーク",
522
- booktitle = "言語処理学会第 28 回年次大会",
523
- year = "2022",
524
- url = "https://www.anlp.jp/proceedings/annual_meeting/2022/pdf_dir/E8-4.pdf"
525
- note= "in Japanese"
526
  }
527
  ```
528
 
529
  ### Contributions
530
 
531
- Thanks to [Kentaro Kurihara](https://twitter.com/kkurihara_cs), [Daisuke Kawahara](https://twitter.com/daisukekawahar1), and [Tomohide Shibata](https://twitter.com/stomohide) for creating this dataset.
 
 
16
  - original
17
  tags:
18
  - MARC
19
+ - CoLA
20
  - STS
21
  - NLI
22
  - SQuAD
 
73
 
74
  ### Dataset Summary
75
 
76
+ From [JGLUE's README.md](https://github.com/yahoojapan/JGLUE#jglue-japanese-general-language-understanding-evaluation):
77
 
78
  > JGLUE, Japanese General Language Understanding Evaluation, is built to measure the general NLU ability in Japanese. JGLUE has been constructed from scratch without translation. We hope that JGLUE will facilitate NLU research in Japanese.
79
 
 
81
 
82
  ### Supported Tasks and Leaderboards
83
 
84
+ From [JGLUE's README.md](https://github.com/yahoojapan/JGLUE#tasksdatasets):
85
 
86
  > JGLUE consists of the tasks of text classification, sentence pair classification, and QA. Each task consists of multiple datasets.
87
 
 
89
 
90
  ##### MARC-ja
91
 
92
+ From [JGLUE's README.md](https://github.com/yahoojapan/JGLUE#marc-ja):
93
 
94
  > MARC-ja is a dataset of the text classification task. This dataset is based on the Japanese portion of [Multilingual Amazon Reviews Corpus (MARC)](https://docs.opendata.aws/amazon-reviews-ml/readme.html) ([Keung+, 2020](https://aclanthology.org/2020.emnlp-main.369/)).
95
 
96
+ ##### JCoLA
97
+
98
+ From [JCoLA's README.md](https://github.com/osekilab/JCoLA#jcola-japanese-corpus-of-linguistic-acceptability)
99
+
100
+ > JCoLA (Japanese Corpus of Linguistic Accept010 ability) is a novel dataset for targeted syntactic evaluations of language models in Japanese, which consists of 10,020 sentences with acceptability judgments by linguists. The sentences are manually extracted from linguistics journals, handbooks and textbooks. JCoLA is included in [JGLUE benchmark](https://github.com/yahoojapan/JGLUE) (Kurihara et al., 2022).
101
+
102
  ##### JSTS
103
 
104
+ From [JGLUE's README.md](https://github.com/yahoojapan/JGLUE#jsts):
105
 
106
  > JSTS is a Japanese version of the STS (Semantic Textual Similarity) dataset. STS is a task to estimate the semantic similarity of a sentence pair. The sentences in JSTS and JNLI (described below) are extracted from the Japanese version of the MS COCO Caption Dataset, [the YJ Captions Dataset](https://github.com/yahoojapan/YJCaptions) ([Miyazaki and Shimizu, 2016](https://aclanthology.org/P16-1168/)).
107
 
108
  ##### JNLI
109
 
110
+ From [JGLUE's README.md](https://github.com/yahoojapan/JGLUE#jnli):
111
 
112
  > JNLI is a Japanese version of the NLI (Natural Language Inference) dataset. NLI is a task to recognize the inference relation that a premise sentence has to a hypothesis sentence. The inference relations are entailment, contradiction, and neutral.
113
 
114
  ##### JSQuAD
115
 
116
+ From [JGLUE's README.md](https://github.com/yahoojapan/JGLUE#jsquad):
117
 
118
  > JSQuAD is a Japanese version of [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) ([Rajpurkar+, 2018](https://aclanthology.org/P18-2124/)), one of the datasets of reading comprehension. Each instance in the dataset consists of a question regarding a given context (Wikipedia article) and its answer. JSQuAD is based on SQuAD 1.1 (there are no unanswerable questions). We used [the Japanese Wikipedia dump](https://dumps.wikimedia.org/jawiki/) as of 20211101.
119
 
120
  ##### JCommonsenseQA
121
 
122
+ From [JGLUE's README.md](https://github.com/yahoojapan/JGLUE#jcommonsenseqa):
123
 
124
  > JCommonsenseQA is a Japanese version of [CommonsenseQA](https://www.tau-nlp.org/commonsenseqa) ([Talmor+, 2019](https://aclanthology.org/N19-1421/)), which is a multiple-choice question answering dataset that requires commonsense reasoning ability. It is built using crowdsourcing with seeds extracted from the knowledge base [ConceptNet](https://conceptnet.io/).
125
 
126
  #### Leaderboard
127
 
128
+ From [JGLUE's README.md](https://github.com/yahoojapan/JGLUE#leaderboard):
129
 
130
  > A leaderboard will be made public soon. The test set will be released at that time.
131
 
 
159
  # })
160
  ```
161
 
162
+ #### JCoLA
163
+
164
+ ```python
165
+ from datasets import load_dataset
166
+
167
+ dataset = load_dataset("shunk031/JGLUE", name="JCoLA")
168
+
169
+ print(dataset)
170
+ # DatasetDict({
171
+ # train: Dataset({
172
+ # features: ['uid', 'source', 'label', 'diacritic', 'sentence', 'original', 'translation', 'gloss', 'simple', 'linguistic_phenomenon'],
173
+ # num_rows: 6919
174
+ # })
175
+ # validation: Dataset({
176
+ # features: ['uid', 'source', 'label', 'diacritic', 'sentence', 'original', 'translation', 'gloss', 'simple', 'linguistic_phenomenon'],
177
+ # num_rows: 865
178
+ # })
179
+ # validation_out_of_domain: Dataset({
180
+ # features: ['uid', 'source', 'label', 'diacritic', 'sentence', 'original', 'translation', 'gloss', 'simple', 'linguistic_phenomenon'],
181
+ # num_rows: 685
182
+ # })
183
+ # validation_out_of_domain_annotated: Dataset({
184
+ # features: ['uid', 'source', 'label', 'diacritic', 'sentence', 'original', 'translation', 'gloss', 'simple', 'linguistic_phenomenon'],
185
+ # num_rows: 685
186
+ # })
187
+ # })
188
+ ```
189
+
190
+ An example of the JCoLA dataset (validation - out of domain annotated) looks as follows:
191
+
192
+ ```json
193
+ {
194
+ "uid": 9109,
195
+ "source": "Asano_and_Ura_2010",
196
+ "label": 1,
197
+ "diacritic": "g",
198
+ "sentence": "太郎のゴミの捨て方について話した。",
199
+ "original": "太郎のゴミの捨て方",
200
+ "translation": "‘The way (for Taro) to throw out garbage’",
201
+ "gloss": true,
202
+ "linguistic_phenomenon": {
203
+ "argument_structure": true,
204
+ "binding": false,
205
+ "control_raising": false,
206
+ "ellipsis": false,
207
+ "filler_gap": false,
208
+ "island_effects": false,
209
+ "morphology": false,
210
+ "nominal_structure": false,
211
+ "negative_polarity_concord_items": false,
212
+ "quantifier": false,
213
+ "verbal_agreement": false,
214
+ "simple": false
215
+ }
216
+ }
217
+ ```
218
+
219
  #### JSTS
220
 
221
  ```python
 
363
 
364
  ##### Explanation for `yjcaptions_id`
365
 
366
+ From [JGLUE's README.md](https://github.com/yahoojapan/JGLUE#explanation-for-yjcaptions_id), there are the following two cases:
367
 
368
  1. sentence pairs in one image: `(image id)-(sentence1 id)-(sentence2 id)`
369
  - e.g., 723-844-847
 
371
  2. sentence pairs in two images: `(image id of sentence1)_(image id of sentence2)-(sentence1 id)-(sentence2 id)`
372
  - e.g., 91337_217583-96105-91680
373
 
374
+ #### JCoLA
375
+
376
+ From [JCoLA's README.md](https://github.com/osekilab/JCoLA#data-description) and [JCoLA's paper](https://arxiv.org/abs/2309.12676)
377
+
378
+ - `uid`: unique id of the sentence
379
+ - `source`: author and the year of publication of the source article
380
+ - `label`: acceptability judgement label (0 for unacceptable, 1 for acceptable)
381
+ - `diacritic`: acceptability judgement as originally notated in the source article
382
+ - `sentence`: sentence (modified by the author if needed)
383
+ - `original`: original sentence as presented in the source article
384
+ - `translation`: English translation of the sentence as presentend in the source article (if any)
385
+ - `gloss`: gloss of the sentence as presented in the source article (if any)
386
+ - `linguistic_phenomenon`
387
+ - `argument_structure`: acceptability judgements based on the order of arguments and case marking
388
+ - `binding`: acceptability judgements based on the binding of noun phrases
389
+ - `control_raising`: acceptability judgements based on predicates that are categorized as control or raising
390
+ - `ellipsis`: acceptability judgements based on the possibility of omitting elements in the sentences
391
+ - `filler_gap`: acceptability judgements based on the dependency between the moved element and the gap
392
+ - `island effects`: acceptability judgements based on the restrictions on filler-gap dependencies such as wh-movements
393
+ - `morphology`: acceptability judgements based on the morphology
394
+ - `nominal_structure`: acceptability judgements based on the internal structure of noun phrases
395
+ - `negative_polarity_concord_items`: acceptability judgements based on the restrictions on where negative polarity/concord items (NPIs/NCIs) can appear
396
+ - `quantifiers`: acceptability judgements based on the distribution of quantifiers such as floating quantifiers
397
+ - `verbal_agreement`: acceptability judgements based on the dependency between subjects and verbs
398
+ - `simple`: acceptability judgements that do not have marked syntactic structures
399
+
400
  #### JNLI
401
 
402
  - `sentence_pair_id`: ID of the sentence pair
 
427
 
428
  ### Data Splits
429
 
430
+ From [JGLUE's README.md](https://github.com/yahoojapan/JGLUE/blob/main/README.md#tasksdatasets):
431
 
432
  > Only train/dev sets are available now, and the test set will be available after the leaderboard is made public.
433
 
434
+ From [JCoLA's paper](https://arxiv.org/abs/2309.12676):
435
+
436
+ > The in-domain data is split into training data (6,919 instances), development data (865 instances), and test data (865 instances). On the other hand, the out-of-domain data is only used for evaluation, and divided into development data (685 instances) and test data (686 instances).
437
+
438
  | Task | Dataset | Train | Dev | Test |
439
  |------------------------------|----------------|--------:|------:|------:|
440
  | Text Classification | MARC-ja | 187,528 | 5,654 | 5,639 |
441
+ | | JCoLA | 6,919 | 865† / 685‡ | 865† / 685‡ |
442
  | Sentence Pair Classification | JSTS | 12,451 | 1,457 | 1,589 |
443
  | | JNLI | 20,073 | 2,434 | 2,508 |
444
  | Question Answering | JSQuAD | 62,859 | 4,442 | 4,420 |
445
  | | JCommonsenseQA | 8,939 | 1,119 | 1,118 |
446
 
447
+ > JCoLA: † in domain. ‡ out of domain.
448
 
449
  ## Dataset Creation
450
 
451
  ### Curation Rationale
452
 
453
+ From [JGLUE's paper](https://aclanthology.org/2022.lrec-1.317/):
454
 
455
  > JGLUE is designed to cover a wide range of GLUE and SuperGLUE tasks and consists of three kinds of tasks: text classification, sentence pair classification, and question answering.
456
 
 
462
 
463
  #### Who are the source language producers?
464
 
465
+ - The source language producers are users of Amazon (MARC-ja), crowd-workers of [Yahoo! Crowdsourcing](https://crowdsourcing.yahoo.co.jp/) (JSTS, JNLI and JCommonsenseQA), writers of the Japanese Wikipedia (JSQuAD), crowd-workers of [Lancers](https://www.lancers.jp/).
466
 
467
  ### Annotations
468
 
 
470
 
471
  ##### MARC-ja
472
 
473
+ From [JGLUE's paper](https://aclanthology.org/2022.lrec-1.317/):
474
 
475
  > As one of the text classification datasets, we build a dataset based on the Multilingual Amazon Reviews Corpus (MARC) (Keung et al., 2020). MARC is a multilingual corpus of product reviews with 5-level star ratings (1-5) on the Amazon shopping site. This corpus covers six languages, including English and Japanese. For JGLUE, we use the Japanese part of MARC and to make it easy for both humans and computers to judge a class label, we cast the text classification task as a binary classification task, where 1- and 2-star ratings are converted to “negative”, and 4 and 5 are converted to “positive”. We do not use reviews with a 3-star rating.
476
 
 
478
 
479
  > We obtained 5,654 and 5,639 instances for the dev and test data, respectively, through the above procedure. For the training data, we extracted 187,528 instances directly from MARC without performing the cleaning procedure because of the large number of training instances. The statistics of MARC-ja are listed in Table 2. For the evaluation metric for MARC-ja, we use accuracy because it is a binary classification task of texts.
480
 
481
+ ##### JCoLA
482
+
483
+ From [JCoLA's paper](https://arxiv.org/abs/2309.12676):
484
+
485
+ > ### 3 JCoLA
486
+ > In this study, we introduce JCoLA (Japanese Corpus of Linguistic Acceptability), which will be the first large-scale acceptability judgment task dataset focusing on Japanese. JCoLA consists of sentences from textbooks and handbooks on Japanese syntax, as well as from journal articles on Japanese syntax that are published in JEAL (Journal of East Asian Linguistics), one of the prestigious journals in theoretical linguistics.
487
+
488
+ > #### 3.1 Data Collection
489
+ > Sentences in JCoLA were collected from prominent textbooks and handbooks focusing on Japanese syntax. In addition to the main text, example sentences included in the footnotes were also considered for collection. We also collected acceptability judgments from journal articles on Japanese syntax published in JEAL (Journal of East Asian Linguistics): one of the prestigious journals in the-oretical linguistics. Specifically, we examined all the articles published in JEAL between 2006 and 2015 (133 papers in total), and extracted 2,252 acceptability judgments from 26 papers on Japanese syntax (Table 2). Acceptability judgments include sentences in appendices and footnotes, but not sentences presented for analyses of syntactic structures (e.g. sentences with brackets to show their syntactic structures). As a result, a total of 11,984 example. sentences were collected. Using this as a basis, JCoLA was constructed through the methodology explained in the following sections.
490
+
491
  ##### JSTS and JNLI
492
 
493
+ From [JGLUE's paper](https://aclanthology.org/2022.lrec-1.317/):
494
 
495
  > For the sentence pair classification datasets, we construct a semantic textual similarity (STS) dataset, JSTS, and a natural language inference (NLI) dataset, JNLI.
496
 
 
515
 
516
  ##### JSQuAD
517
 
518
+ From [JGLUE's paper](https://aclanthology.org/2022.lrec-1.317/):
519
 
520
  > As QA datasets, we build a Japanese version of SQuAD (Rajpurkar et al., 2016), one of the datasets of reading comprehension, and a Japanese version ofCommonsenseQA, which is explained in the next section.
521
 
 
527
 
528
  ##### JCommonsenseQA
529
 
530
+ From [JGLUE's paper](https://aclanthology.org/2022.lrec-1.317/):
531
 
532
  > ### Overview
533
  > JCommonsenseQA is a Japanese version of CommonsenseQA (Talmor et al., 2019), which consists of five choice QA to evaluate commonsense reasoning ability. Figure 3 shows examples of JCommonsenseQA. In the same way as CommonsenseQA, JCommonsenseQA is built using crowdsourcing with seeds extracted from the knowledge base ConceptNet (Speer et al., 2017). ConceptNet is a multilingual knowledge base that consists of triplets of two concepts and their relation. The triplets are directional and represented as (source concept, relation, target concept), for example (bullet train, AtLocation, station).
 
550
 
551
  #### Who are the annotators?
552
 
553
+ From [JGLUE's README.md](https://github.com/yahoojapan/JGLUE/blob/main/README.md#tasksdatasets):
554
 
555
  > We use Yahoo! Crowdsourcing for all crowdsourcing tasks in constructing the datasets.
556
 
557
+ From [JCoLA's paper](https://arxiv.org/abs/2309.12676):
558
+
559
+ > As a reference for the upper limit of accuracy in JCoLA, human acceptability judgment experiments were conducted on Lancers2 with a subset of the JCoLA data.
560
+
561
  ### Personal and Sensitive Information
562
 
563
  [More Information Needed]
 
566
 
567
  ### Social Impact of Dataset
568
 
569
+ From [JGLUE's paper](https://aclanthology.org/2022.lrec-1.317/):
570
 
571
  > We build a Japanese NLU benchmark, JGLUE, from scratch without translation to measure the general NLU ability in Japanese. We hope that JGLUE will facilitate NLU research in Japanese.
572
 
 
576
 
577
  ### Other Known Limitations
578
 
579
+ From [JCoLA's paper](https://arxiv.org/abs/2309.12676):
580
+
581
+ > All the sentences included in JCoLA have been extracted from textbooks, handbooks and journal articles on theoretical syntax. Therefore, those sentences are guaranteed to be theoretically meaningful, making JCoLA a challenging dataset. However, the distribution of linguistic phenomena directly reflects that of the source literature and thus turns out to be extremely skewed. Indeed, as can be seen in Table 3, while the number of sentences exceeds 100 for most linguistic phenomena, there are several linguistic phenomena for which there are only about 10 sentences. In addition, since it is difficult to force language models to interpret sentences given specific contexts, those sentences whose unacceptability depends on contexts were inevitably removed from JCoLA. This removal process resulted in the deletion of unacceptable sentences from some linguistic phenomena (such as ellipsis), consequently skewing the balance between acceptable and unacceptable sentences (with a higher proportion of acceptable sentences).
582
 
583
  ## Additional Information
584
 
 
590
 
591
  - Keung, Phillip, et al. "The Multilingual Amazon Reviews Corpus." Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP). 2020.
592
 
593
+ #### JCoLA
594
+
595
+ - Someya, Sugimoto, and Oseki. "JCoLA: Japanese Corpus of Linguistic Acceptability." arxiv preprint arXiv:2309.12676 (2023).
596
+
597
  #### JSTS and JNLI
598
 
599
  - Miyazaki, Takashi, and Nobuyuki Shimizu. "Cross-lingual image caption generation." Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 2016.
600
 
601
  #### JSQuAD
602
 
603
+ The JGLUE's 'authors curated the original data for JSQuAD from the Japanese wikipedia dump.
604
 
605
  #### JCommonsenseQA
606
 
 
608
 
609
  ### Licensing Information
610
 
611
+ #### JGLUE
612
+
613
+ From [JGLUE's README.md'](https://github.com/yahoojapan/JGLUE#license):
614
+
615
  > This work is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License.
616
 
617
+ #### JCoLA
618
+
619
+ From [JCoLA's README.md'](https://github.com/osekilab/JCoLA#license):
620
+
621
+ > The text in this corpus is excerpted from the published works, and copyright (where applicable) remains with the original authors or publishers. We expect that research use within Japan is legal under fair use, but make no guarantee of this.
622
+
623
  ### Citation Information
624
 
625
+ #### JGLUE
626
+
627
+ ```bibtex
628
+ @inproceedings{kurihara-lrec-2022-jglue,
629
+ title={JGLUE: Japanese general language understanding evaluation},
630
+ author={Kurihara, Kentaro and Kawahara, Daisuke and Shibata, Tomohide},
631
+ booktitle={Proceedings of the Thirteenth Language Resources and Evaluation Conference},
632
+ pages={2957--2966},
633
+ year={2022},
634
+ url={https://aclanthology.org/2022.lrec-1.317/}
635
+ }
636
+ ```
637
+
638
+ ```bibtex
639
+ @inproceedings{kurihara-nlp-2022-jglue,
640
+ title={JGLUE: 日本語言語理解ベンチマーク},
641
+ author={栗原健太郎 and 河原大輔 and 柴田知秀},
642
+ booktitle={言語処理学会第 28 回年次大会},
643
+ pages={2023--2028},
644
+ year={2022},
645
+ url={https://www.anlp.jp/proceedings/annual_meeting/2022/pdf_dir/E8-4.pdf},
646
+ note={in Japanese}
647
+ }
648
+ ```
649
+
650
+ #### MARC-ja
651
+
652
+ ```bibtex
653
+ @inproceedings{marc_reviews,
654
+ title={The Multilingual Amazon Reviews Corpus},
655
+ author={Keung, Phillip and Lu, Yichao and Szarvas, György and Smith, Noah A.},
656
+ booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing},
657
+ year={2020}
658
+ }
659
+ ```
660
+
661
+ #### JCoLA
662
+
663
+ ```bibtex
664
+ @article{someya-arxiv-2023-jcola,
665
+ title={JCoLA: Japanese Corpus of Linguistic Acceptability},
666
+ author={Taiga Someya and Yushi Sugimoto and Yohei Oseki},
667
+ year={2023},
668
+ eprint={2309.12676},
669
+ archivePrefix={arXiv},
670
+ primaryClass={cs.CL}
671
+ }
672
+ ```
673
+
674
  ```bibtex
675
+ @inproceedings{someya-nlp-2022-jcola,
676
+ title={日本語版 CoLA の構築},
677
+ author={染谷 大河 and 大関 洋平},
678
+ booktitle={言語処理学会第 28 回年次大会},
679
+ pages={1872--1877},
680
+ year={2022},
681
+ url={https://www.anlp.jp/proceedings/annual_meeting/2022/pdf_dir/E7-1.pdf},
682
+ note={in Japanese}
 
 
 
 
 
683
  }
684
  ```
685
 
686
+ #### JSTS and JNLI
687
+
688
  ```bibtex
689
+ @inproceedings{miyazaki2016cross,
690
+ title={Cross-lingual image caption generation},
691
+ author={Miyazaki, Takashi and Shimizu, Nobuyuki},
692
+ booktitle={Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
693
+ pages={1780--1790},
694
+ year={2016}
 
695
  }
696
  ```
697
 
698
  ### Contributions
699
 
700
+ Thanks to [Kentaro Kurihara](https://twitter.com/kkurihara_cs), [Daisuke Kawahara](https://twitter.com/daisukekawahar1), and [Tomohide Shibata](https://twitter.com/stomohide) for creating JGLUE dataset.
701
+ Thanks to [Taiga Someya](https://twitter.com/T0a8i0g9a) for creating JCoLA dataset.
pyproject.toml CHANGED
@@ -21,7 +21,6 @@ pytest = "^7.2.1"
21
 
22
  [tool.ruff]
23
  target-version = "py38"
24
- # select = ["ALL"]
25
  ignore = [
26
  "E501", # line too long, handled by black
27
  ]
 
21
 
22
  [tool.ruff]
23
  target-version = "py38"
 
24
  ignore = [
25
  "E501", # line too long, handled by black
26
  ]
tests/JGLUE_test.py CHANGED
@@ -45,3 +45,19 @@ def test_load_marc_ja(
45
 
46
  assert dataset["train"].num_rows == expected_num_train
47
  assert dataset["validation"].num_rows == expected_num_valid
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  assert dataset["train"].num_rows == expected_num_train
47
  assert dataset["validation"].num_rows == expected_num_valid
48
+
49
+
50
+ def test_load_jcola(
51
+ dataset_path: str,
52
+ dataset_name: str = "JCoLA",
53
+ expected_num_train: int = 6919,
54
+ expected_num_valid: int = 865,
55
+ expected_num_valid_ood: int = 685,
56
+ ):
57
+ dataset = ds.load_dataset(path=dataset_path, name=dataset_name)
58
+ assert dataset["train"].num_rows == expected_num_train
59
+ assert dataset["validation"].num_rows == expected_num_valid
60
+ assert dataset["validation_out_of_domain"].num_rows == expected_num_valid_ood
61
+ assert (
62
+ dataset["validation_out_of_domain_annotated"].num_rows == expected_num_valid_ood
63
+ )