albertvillanova HF staff commited on
Commit
b540105
1 Parent(s): 8f38abb

Convert dataset to Parquet (#3)

Browse files

- Convert dataset to Parquet (c2a70ebbf4b71dd1faccb3c4a0f9d157dfb3b63f)
- Add cdsc-r data files (bd30a6f1a0683088bd784152e16e0ccc65dd03c6)
- Delete loading script (0a6e4cea3af0c40701848cf8ddcfb835364f168e)
- Delete legacy dataset_infos.json (969082d90cebe94628d86fd58facdfb489000d56)

README.md CHANGED
@@ -38,16 +38,16 @@ dataset_info:
38
  '2': ENTAILMENT
39
  splits:
40
  - name: train
41
- num_bytes: 1381902
42
  num_examples: 8000
43
  - name: test
44
- num_bytes: 179400
45
  num_examples: 1000
46
  - name: validation
47
- num_bytes: 174662
48
  num_examples: 1000
49
- download_size: 376079
50
- dataset_size: 1735964
51
  - config_name: cdsc-r
52
  features:
53
  - name: pair_ID
@@ -60,16 +60,33 @@ dataset_info:
60
  dtype: float32
61
  splits:
62
  - name: train
63
- num_bytes: 1349902
64
  num_examples: 8000
65
  - name: test
66
- num_bytes: 175400
67
  num_examples: 1000
68
  - name: validation
69
- num_bytes: 170662
70
  num_examples: 1000
71
- download_size: 381525
72
- dataset_size: 1695964
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  ---
74
 
75
  # Dataset Card for [Dataset Name]
 
38
  '2': ENTAILMENT
39
  splits:
40
  - name: train
41
+ num_bytes: 1381894
42
  num_examples: 8000
43
  - name: test
44
+ num_bytes: 179392
45
  num_examples: 1000
46
  - name: validation
47
+ num_bytes: 174654
48
  num_examples: 1000
49
+ download_size: 744169
50
+ dataset_size: 1735940
51
  - config_name: cdsc-r
52
  features:
53
  - name: pair_ID
 
60
  dtype: float32
61
  splits:
62
  - name: train
63
+ num_bytes: 1349894
64
  num_examples: 8000
65
  - name: test
66
+ num_bytes: 175392
67
  num_examples: 1000
68
  - name: validation
69
+ num_bytes: 170654
70
  num_examples: 1000
71
+ download_size: 747648
72
+ dataset_size: 1695940
73
+ configs:
74
+ - config_name: cdsc-e
75
+ data_files:
76
+ - split: train
77
+ path: cdsc-e/train-*
78
+ - split: test
79
+ path: cdsc-e/test-*
80
+ - split: validation
81
+ path: cdsc-e/validation-*
82
+ - config_name: cdsc-r
83
+ data_files:
84
+ - split: train
85
+ path: cdsc-r/train-*
86
+ - split: test
87
+ path: cdsc-r/test-*
88
+ - split: validation
89
+ path: cdsc-r/validation-*
90
  ---
91
 
92
  # Dataset Card for [Dataset Name]
cdsc-e/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e018e1306afa13c0a011bc878b3f3bba337b10ed75ab476930726867632553e
3
+ size 47115
cdsc-e/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c80d5a2eaf4697e91ffcad641bfdf20dbe1548a9069755d45e0b3ca64b49f9a
3
+ size 648225
cdsc-e/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51c868664b7aaaf075fe29e1672c087e9a9af16b25cfe31c9c5917be52eeb942
3
+ size 48829
cdsc-r/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:361bf5dad3262bfa9af66cd8aae95ddd07337a27a2defaffa456e32593416255
3
+ size 46961
cdsc-r/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24d77c97dafda245f87a9a63a423eee0e1c9abd06c3de2c02552e27b3cf47fce
3
+ size 651529
cdsc-r/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dec23d23a46f1c22274c7276c15bf0dddfefb4198ae20d00b9272cb2b3fceb4b
3
+ size 49158
cdsc.py DELETED
@@ -1,143 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """cdsc-e & cdsc-r"""
16
-
17
-
18
- import csv
19
- import os
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- @inproceedings{wroblewska2017polish,
26
- title={Polish evaluation dataset for compositional distributional semantics models},
27
- author={Wr{\'o}blewska, Alina and Krasnowska-Kiera{\'s}, Katarzyna},
28
- booktitle={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},
29
- pages={784--792},
30
- year={2017}
31
- }
32
- """
33
-
34
- _DESCRIPTION = """\
35
- Polish CDSCorpus consists of 10K Polish sentence pairs which are human-annotated for semantic relatedness and entailment. The dataset may be used for the evaluation of compositional distributional semantics models of Polish. The dataset was presented at ACL 2017. Please refer to the Wróblewska and Krasnowska-Kieraś (2017) for a detailed description of the resource.
36
- """
37
-
38
- _HOMEPAGE = "http://zil.ipipan.waw.pl/Scwad/CDSCorpus"
39
-
40
- _LICENSE = "CC BY-NC-SA 4.0"
41
-
42
- _URLs = {
43
- "cdsc-e": "https://klejbenchmark.com/static/data/klej_cdsc-e.zip",
44
- "cdsc-r": "https://klejbenchmark.com/static/data/klej_cdsc-r.zip",
45
- }
46
-
47
-
48
- class Cdsc(datasets.GeneratorBasedBuilder):
49
- """CDSCorpus"""
50
-
51
- VERSION = datasets.Version("1.1.0")
52
-
53
- BUILDER_CONFIGS = [
54
- datasets.BuilderConfig(
55
- name="cdsc-e",
56
- version=VERSION,
57
- description="Polish CDSCorpus consists of 10K Polish sentence pairs which are human-annotated for semantic entailment.",
58
- ),
59
- datasets.BuilderConfig(
60
- name="cdsc-r",
61
- version=VERSION,
62
- description="Polish CDSCorpus consists of 10K Polish sentence pairs which are human-annotated for semantic relatedness.",
63
- ),
64
- ]
65
-
66
- def _info(self):
67
- if self.config.name == "cdsc-e":
68
- features = datasets.Features(
69
- {
70
- "pair_ID": datasets.Value("int32"),
71
- "sentence_A": datasets.Value("string"),
72
- "sentence_B": datasets.Value("string"),
73
- "entailment_judgment": datasets.ClassLabel(
74
- names=[
75
- "NEUTRAL",
76
- "CONTRADICTION",
77
- "ENTAILMENT",
78
- ]
79
- ),
80
- }
81
- )
82
- elif self.config.name == "cdsc-r":
83
- features = datasets.Features(
84
- {
85
- "pair_ID": datasets.Value("int32"),
86
- "sentence_A": datasets.Value("string"),
87
- "sentence_B": datasets.Value("string"),
88
- "relatedness_score": datasets.Value("float"),
89
- }
90
- )
91
- return datasets.DatasetInfo(
92
- description=_DESCRIPTION,
93
- features=features,
94
- supervised_keys=None,
95
- homepage=_HOMEPAGE,
96
- license=_LICENSE,
97
- citation=_CITATION,
98
- )
99
-
100
- def _split_generators(self, dl_manager):
101
- """Returns SplitGenerators."""
102
- my_urls = _URLs[self.config.name]
103
- data_dir = dl_manager.download_and_extract(my_urls)
104
- return [
105
- datasets.SplitGenerator(
106
- name=datasets.Split.TRAIN,
107
- gen_kwargs={
108
- "filepath": os.path.join(data_dir, "train.tsv"),
109
- "split": "train",
110
- },
111
- ),
112
- datasets.SplitGenerator(
113
- name=datasets.Split.TEST,
114
- gen_kwargs={"filepath": os.path.join(data_dir, "test_features.tsv"), "split": "test"},
115
- ),
116
- datasets.SplitGenerator(
117
- name=datasets.Split.VALIDATION,
118
- gen_kwargs={
119
- "filepath": os.path.join(data_dir, "dev.tsv"),
120
- "split": "dev",
121
- },
122
- ),
123
- ]
124
-
125
- def _generate_examples(self, filepath, split):
126
- """Yields examples."""
127
- with open(filepath, encoding="utf-8") as f:
128
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
129
- for id_, row in enumerate(reader):
130
- if self.config.name == "cdsc-e":
131
- yield id_, {
132
- "pair_ID": row["pair_ID"],
133
- "sentence_A": row["sentence_A"],
134
- "sentence_B": row["sentence_B"],
135
- "entailment_judgment": -1 if split == "test" else row["entailment_judgment"],
136
- }
137
- elif self.config.name == "cdsc-r":
138
- yield id_, {
139
- "pair_ID": row["pair_ID"],
140
- "sentence_A": row["sentence_A"],
141
- "sentence_B": row["sentence_B"],
142
- "relatedness_score": "-1" if split == "test" else row["relatedness_score"],
143
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"cdsc-e": {"description": "Polish CDSCorpus consists of 10K Polish sentence pairs which are human-annotated for semantic relatedness and entailment. The dataset may be used for the evaluation of compositional distributional semantics models of Polish. The dataset was presented at ACL 2017. Please refer to the Wr\u00f3blewska and Krasnowska-Kiera\u015b (2017) for a detailed description of the resource.\n", "citation": "@inproceedings{wroblewska2017polish,\ntitle={Polish evaluation dataset for compositional distributional semantics models},\nauthor={Wr{'o}blewska, Alina and Krasnowska-Kiera{'s}, Katarzyna},\nbooktitle={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},\npages={784--792},\nyear={2017}\n}\n", "homepage": "http://zil.ipipan.waw.pl/Scwad/CDSCorpus", "license": "CC BY-NC-SA 4.0", "features": {"pair_ID": {"dtype": "int32", "id": null, "_type": "Value"}, "sentence_A": {"dtype": "string", "id": null, "_type": "Value"}, "sentence_B": {"dtype": "string", "id": null, "_type": "Value"}, "entailment_judgment": {"num_classes": 3, "names": ["NEUTRAL", "CONTRADICTION", "ENTAILMENT"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "cdsc", "config_name": "cdsc-e", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1381902, "num_examples": 8000, "dataset_name": "cdsc"}, "test": {"name": "test", "num_bytes": 179400, "num_examples": 1000, "dataset_name": "cdsc"}, "validation": {"name": "validation", "num_bytes": 174662, "num_examples": 1000, "dataset_name": "cdsc"}}, "download_checksums": {"https://klejbenchmark.com/static/data/klej_cdsc-e.zip": {"num_bytes": 376079, "checksum": "5314b3294ec36c49793e83ed1923c4551bda3fe15c6060eddbc57d3b15c19c05"}}, "download_size": 376079, "post_processing_size": null, "dataset_size": 1735964, "size_in_bytes": 2112043}, "cdsc-r": {"description": "Polish CDSCorpus consists of 10K Polish sentence pairs which are human-annotated for semantic relatedness and entailment. The dataset may be used for the evaluation of compositional distributional semantics models of Polish. The dataset was presented at ACL 2017. Please refer to the Wr\u00f3blewska and Krasnowska-Kiera\u015b (2017) for a detailed description of the resource.\n", "citation": "@inproceedings{wroblewska2017polish,\ntitle={Polish evaluation dataset for compositional distributional semantics models},\nauthor={Wr{'o}blewska, Alina and Krasnowska-Kiera{'s}, Katarzyna},\nbooktitle={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)},\npages={784--792},\nyear={2017}\n}\n", "homepage": "http://zil.ipipan.waw.pl/Scwad/CDSCorpus", "license": "CC BY-NC-SA 4.0", "features": {"pair_ID": {"dtype": "int32", "id": null, "_type": "Value"}, "sentence_A": {"dtype": "string", "id": null, "_type": "Value"}, "sentence_B": {"dtype": "string", "id": null, "_type": "Value"}, "relatedness_score": {"dtype": "float32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "cdsc", "config_name": "cdsc-r", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1349902, "num_examples": 8000, "dataset_name": "cdsc"}, "test": {"name": "test", "num_bytes": 175400, "num_examples": 1000, "dataset_name": "cdsc"}, "validation": {"name": "validation", "num_bytes": 170662, "num_examples": 1000, "dataset_name": "cdsc"}}, "download_checksums": {"https://klejbenchmark.com/static/data/klej_cdsc-r.zip": {"num_bytes": 381525, "checksum": "3e3358cc120bc0475e3944bd4491943a3b94d2fb4ed35773aa550ec99a1a70cc"}}, "download_size": 381525, "post_processing_size": null, "dataset_size": 1695964, "size_in_bytes": 2077489}}