Convert dataset to Parquet
#1
by
KennethEnevoldsen
- opened
- README.md +38 -0
- ctkfacts_nli.py +0 -92
- data/test-00000-of-00001.parquet +3 -0
- data/test.jsonl +0 -0
- data/train-00000-of-00001.parquet +3 -0
- data/train.jsonl +0 -0
- data/validation-00000-of-00001.parquet +3 -0
- data/validation.jsonl +0 -0
- experiments.ipynb +0 -211
- generate_readme.py +0 -0
- test.py +0 -3
README.md
CHANGED
@@ -1,3 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# CTKFacts dataset for Natural Language Inference
|
2 |
|
3 |
Czech Natural Language Inference dataset of ~3K *evidence*-*claim* pairs labelled with SUPPORTS, REFUTES or NOT ENOUGH INFO veracity labels. Extracted from a round of fact-checking experiments concluded and described within the CsFEVER and [CTKFacts: Czech Datasets for Fact Verification](https://arxiv.org/abs/2201.11115) paper currently being revised for publication in LREV journal.
|
|
|
1 |
+
---
|
2 |
+
dataset_info:
|
3 |
+
features:
|
4 |
+
- name: id
|
5 |
+
dtype: int32
|
6 |
+
- name: label
|
7 |
+
dtype:
|
8 |
+
class_label:
|
9 |
+
names:
|
10 |
+
'0': REFUTES
|
11 |
+
'1': NOT ENOUGH INFO
|
12 |
+
'2': SUPPORTS
|
13 |
+
- name: evidence
|
14 |
+
dtype: string
|
15 |
+
- name: claim
|
16 |
+
dtype: string
|
17 |
+
splits:
|
18 |
+
- name: train
|
19 |
+
num_bytes: 2109934
|
20 |
+
num_examples: 3626
|
21 |
+
- name: validation
|
22 |
+
num_bytes: 242251
|
23 |
+
num_examples: 482
|
24 |
+
- name: test
|
25 |
+
num_bytes: 285028
|
26 |
+
num_examples: 558
|
27 |
+
download_size: 963565
|
28 |
+
dataset_size: 2637213
|
29 |
+
configs:
|
30 |
+
- config_name: default
|
31 |
+
data_files:
|
32 |
+
- split: train
|
33 |
+
path: data/train-*
|
34 |
+
- split: validation
|
35 |
+
path: data/validation-*
|
36 |
+
- split: test
|
37 |
+
path: data/test-*
|
38 |
+
---
|
39 |
# CTKFacts dataset for Natural Language Inference
|
40 |
|
41 |
Czech Natural Language Inference dataset of ~3K *evidence*-*claim* pairs labelled with SUPPORTS, REFUTES or NOT ENOUGH INFO veracity labels. Extracted from a round of fact-checking experiments concluded and described within the CsFEVER and [CTKFacts: Czech Datasets for Fact Verification](https://arxiv.org/abs/2201.11115) paper currently being revised for publication in LREV journal.
|
ctkfacts_nli.py
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import pathlib
|
3 |
-
from typing import overload
|
4 |
-
import datasets
|
5 |
-
import json
|
6 |
-
|
7 |
-
from datasets.info import DatasetInfo
|
8 |
-
|
9 |
-
_VERSION = "0.0.6"
|
10 |
-
|
11 |
-
_URL= "data/"
|
12 |
-
|
13 |
-
_URLS = {
|
14 |
-
"train": _URL + "train.jsonl",
|
15 |
-
"validation": _URL + "validation.jsonl",
|
16 |
-
"test": _URL + "test.jsonl"
|
17 |
-
}
|
18 |
-
|
19 |
-
_DESCRIPTION = """\
|
20 |
-
CtkFactsNLI is a NLI version of the Czech CTKFacts dataset
|
21 |
-
"""
|
22 |
-
|
23 |
-
_CITATION = """\
|
24 |
-
@article{DBLP:journals/corr/abs-2201-11115,
|
25 |
-
author = {Jan Drchal and
|
26 |
-
Herbert Ullrich and
|
27 |
-
Martin R{\'{y}}par and
|
28 |
-
Hana Vincourov{\'{a}} and
|
29 |
-
V{\'{a}}clav Moravec},
|
30 |
-
title = {CsFEVER and CTKFacts: Czech Datasets for Fact Verification},
|
31 |
-
journal = {CoRR},
|
32 |
-
volume = {abs/2201.11115},
|
33 |
-
year = {2022},
|
34 |
-
url = {https://arxiv.org/abs/2201.11115},
|
35 |
-
eprinttype = {arXiv},
|
36 |
-
eprint = {2201.11115},
|
37 |
-
timestamp = {Tue, 01 Feb 2022 14:59:01 +0100},
|
38 |
-
biburl = {https://dblp.org/rec/journals/corr/abs-2201-11115.bib},
|
39 |
-
bibsource = {dblp computer science bibliography, https://dblp.org}
|
40 |
-
}
|
41 |
-
"""
|
42 |
-
|
43 |
-
datasets.utils.version.Version
|
44 |
-
class CtkfactsNli(datasets.GeneratorBasedBuilder):
|
45 |
-
def _info(self):
|
46 |
-
return datasets.DatasetInfo(
|
47 |
-
description=_DESCRIPTION,
|
48 |
-
features=datasets.Features(
|
49 |
-
{
|
50 |
-
"id": datasets.Value("int32"),
|
51 |
-
"label": datasets.ClassLabel(names=["REFUTES", "NOT ENOUGH INFO", "SUPPORTS"]),
|
52 |
-
# datasets.features.Sequence({"text": datasets.Value("string"),"answer_start": datasets.Value("int32"),})
|
53 |
-
"evidence": datasets.Value("string"),
|
54 |
-
"claim": datasets.Value("string"),
|
55 |
-
}
|
56 |
-
),
|
57 |
-
# No default supervised_keys (as we have to pass both question
|
58 |
-
# and context as input).
|
59 |
-
supervised_keys=None,
|
60 |
-
version=_VERSION,
|
61 |
-
homepage="https://fcheck.fel.cvut.cz/dataset/",
|
62 |
-
citation=_CITATION,
|
63 |
-
)
|
64 |
-
|
65 |
-
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
66 |
-
downloaded_files = dl_manager.download_and_extract(_URLS)
|
67 |
-
|
68 |
-
return [
|
69 |
-
datasets.SplitGenerator(datasets.Split.TRAIN, {
|
70 |
-
"filepath": downloaded_files["train"]
|
71 |
-
}),
|
72 |
-
datasets.SplitGenerator(datasets.Split.VALIDATION, {
|
73 |
-
"filepath": downloaded_files["validation"]
|
74 |
-
}),
|
75 |
-
datasets.SplitGenerator(datasets.Split.TEST, {
|
76 |
-
"filepath": downloaded_files["test"]
|
77 |
-
}),
|
78 |
-
]
|
79 |
-
|
80 |
-
def _generate_examples(self, filepath):
|
81 |
-
"""This function returns the examples in the raw (text) form."""
|
82 |
-
key = 0
|
83 |
-
with open(filepath, encoding="utf-8") as f:
|
84 |
-
for line in f:
|
85 |
-
datapoint = json.loads(line)
|
86 |
-
yield key, {
|
87 |
-
"id": datapoint["id"],
|
88 |
-
"evidence": " ".join(datapoint["evidence"]),
|
89 |
-
"claim": datapoint["claim"],
|
90 |
-
"label": datapoint["label"]
|
91 |
-
}
|
92 |
-
key += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a0fdc92be778be7947b0b881b0b4537f75827be931c69d5c72d69ab64ea7a31b
|
3 |
+
size 59590
|
data/test.jsonl
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:35013f7fd46883f26090d8282e58e19c15590ca11e1944d5420c6e55387ccba9
|
3 |
+
size 848923
|
data/train.jsonl
DELETED
The diff for this file is too large to render.
See raw diff
|
|
data/validation-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:843641d5165b5d6d5d7afb2870262c5ccf7aebb85ef7d2a5faa6a8a783b2b902
|
3 |
+
size 55052
|
data/validation.jsonl
DELETED
The diff for this file is too large to render.
See raw diff
|
|
experiments.ipynb
DELETED
@@ -1,211 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"cells": [
|
3 |
-
{
|
4 |
-
"cell_type": "markdown",
|
5 |
-
"metadata": {},
|
6 |
-
"source": [
|
7 |
-
"# 🤗 Experiments over hf dataset"
|
8 |
-
]
|
9 |
-
},
|
10 |
-
{
|
11 |
-
"cell_type": "code",
|
12 |
-
"execution_count": 1,
|
13 |
-
"metadata": {},
|
14 |
-
"outputs": [],
|
15 |
-
"source": [
|
16 |
-
"import datasets\n",
|
17 |
-
"from datasets import load_dataset"
|
18 |
-
]
|
19 |
-
},
|
20 |
-
{
|
21 |
-
"cell_type": "code",
|
22 |
-
"execution_count": 2,
|
23 |
-
"metadata": {},
|
24 |
-
"outputs": [
|
25 |
-
{
|
26 |
-
"name": "stderr",
|
27 |
-
"output_type": "stream",
|
28 |
-
"text": [
|
29 |
-
"Using custom data configuration default\n",
|
30 |
-
"Reusing dataset ctkfacts_nli (/Users/bertik/.cache/huggingface/datasets/ctkfacts_nli/default/0.0.0/5dcd805dfbd9694ead18f5cf4da8d902a1a1ca53685a5ebabd33f3d314dd597d)\n",
|
31 |
-
"100%|██████████| 3/3 [00:00<00:00, 409.28it/s]\n"
|
32 |
-
]
|
33 |
-
}
|
34 |
-
],
|
35 |
-
"source": [
|
36 |
-
"d=load_dataset(\"../ctkfacts_nli.py\")"
|
37 |
-
]
|
38 |
-
},
|
39 |
-
{
|
40 |
-
"cell_type": "code",
|
41 |
-
"execution_count": 10,
|
42 |
-
"metadata": {},
|
43 |
-
"outputs": [
|
44 |
-
{
|
45 |
-
"data": {
|
46 |
-
"text/plain": [
|
47 |
-
"DatasetDict({\n",
|
48 |
-
" train: Dataset({\n",
|
49 |
-
" features: ['id', 'label', 'evidence', 'claim'],\n",
|
50 |
-
" num_rows: 2903\n",
|
51 |
-
" })\n",
|
52 |
-
" validation: Dataset({\n",
|
53 |
-
" features: ['id', 'label', 'evidence', 'claim'],\n",
|
54 |
-
" num_rows: 377\n",
|
55 |
-
" })\n",
|
56 |
-
" test: Dataset({\n",
|
57 |
-
" features: ['id', 'label', 'evidence', 'claim'],\n",
|
58 |
-
" num_rows: 431\n",
|
59 |
-
" })\n",
|
60 |
-
"})"
|
61 |
-
]
|
62 |
-
},
|
63 |
-
"execution_count": 10,
|
64 |
-
"metadata": {},
|
65 |
-
"output_type": "execute_result"
|
66 |
-
}
|
67 |
-
],
|
68 |
-
"source": [
|
69 |
-
"d"
|
70 |
-
]
|
71 |
-
},
|
72 |
-
{
|
73 |
-
"cell_type": "code",
|
74 |
-
"execution_count": 11,
|
75 |
-
"metadata": {},
|
76 |
-
"outputs": [
|
77 |
-
{
|
78 |
-
"name": "stdout",
|
79 |
-
"output_type": "stream",
|
80 |
-
"text": [
|
81 |
-
"\n",
|
82 |
-
" _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|\n",
|
83 |
-
" _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n",
|
84 |
-
" _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|\n",
|
85 |
-
" _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n",
|
86 |
-
" _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|\n",
|
87 |
-
"\n",
|
88 |
-
" \n",
|
89 |
-
"Username: ^C\n",
|
90 |
-
"Traceback (most recent call last):\n",
|
91 |
-
" File \"/opt/homebrew/bin/huggingface-cli\", line 8, in <module>\n",
|
92 |
-
" sys.exit(main())\n",
|
93 |
-
" File \"/opt/homebrew/lib/python3.9/site-packages/huggingface_hub/commands/huggingface_cli.py\", line 41, in main\n",
|
94 |
-
" service.run()\n",
|
95 |
-
" File \"/opt/homebrew/lib/python3.9/site-packages/huggingface_hub/commands/user.py\", line 169, in run\n",
|
96 |
-
" username = input(\"Username: \")\n",
|
97 |
-
"KeyboardInterrupt\n"
|
98 |
-
]
|
99 |
-
}
|
100 |
-
],
|
101 |
-
"source": [
|
102 |
-
"!huggingface-cli login"
|
103 |
-
]
|
104 |
-
},
|
105 |
-
{
|
106 |
-
"cell_type": "code",
|
107 |
-
"execution_count": 4,
|
108 |
-
"metadata": {},
|
109 |
-
"outputs": [
|
110 |
-
{
|
111 |
-
"name": "stderr",
|
112 |
-
"output_type": "stream",
|
113 |
-
"text": [
|
114 |
-
"Downloading: 100%|██████████| 2.55k/2.55k [00:00<00:00, 1.33MB/s]\n",
|
115 |
-
"Using custom data configuration default\n"
|
116 |
-
]
|
117 |
-
},
|
118 |
-
{
|
119 |
-
"name": "stdout",
|
120 |
-
"output_type": "stream",
|
121 |
-
"text": [
|
122 |
-
"Downloading and preparing dataset ctkfacts_nli/default to /Users/bertik/.cache/huggingface/datasets/heruberuto___ctkfacts_nli/default/0.0.0/5dcd805dfbd9694ead18f5cf4da8d902a1a1ca53685a5ebabd33f3d314dd597d...\n"
|
123 |
-
]
|
124 |
-
},
|
125 |
-
{
|
126 |
-
"name": "stderr",
|
127 |
-
"output_type": "stream",
|
128 |
-
"text": [
|
129 |
-
"Downloading: 100%|██████████| 2.14M/2.14M [00:00<00:00, 2.86MB/s]\n",
|
130 |
-
"Downloading: 100%|██████████| 247k/247k [00:00<00:00, 386kB/s]\n",
|
131 |
-
"Downloading: 100%|██████████| 287k/287k [00:00<00:00, 450kB/s]\n",
|
132 |
-
"100%|██████████| 3/3 [00:04<00:00, 1.66s/it]\n",
|
133 |
-
"100%|██████████| 3/3 [00:00<00:00, 976.56it/s]\n"
|
134 |
-
]
|
135 |
-
},
|
136 |
-
{
|
137 |
-
"name": "stdout",
|
138 |
-
"output_type": "stream",
|
139 |
-
"text": [
|
140 |
-
"Dataset ctkfacts_nli downloaded and prepared to /Users/bertik/.cache/huggingface/datasets/heruberuto___ctkfacts_nli/default/0.0.0/5dcd805dfbd9694ead18f5cf4da8d902a1a1ca53685a5ebabd33f3d314dd597d. Subsequent calls will reuse this data.\n"
|
141 |
-
]
|
142 |
-
},
|
143 |
-
{
|
144 |
-
"name": "stderr",
|
145 |
-
"output_type": "stream",
|
146 |
-
"text": [
|
147 |
-
"100%|██████████| 3/3 [00:00<00:00, 811.96it/s]\n"
|
148 |
-
]
|
149 |
-
},
|
150 |
-
{
|
151 |
-
"data": {
|
152 |
-
"text/plain": [
|
153 |
-
"DatasetDict({\n",
|
154 |
-
" train: Dataset({\n",
|
155 |
-
" features: ['id', 'label', 'evidence', 'claim'],\n",
|
156 |
-
" num_rows: 2903\n",
|
157 |
-
" })\n",
|
158 |
-
" validation: Dataset({\n",
|
159 |
-
" features: ['id', 'label', 'evidence', 'claim'],\n",
|
160 |
-
" num_rows: 377\n",
|
161 |
-
" })\n",
|
162 |
-
" test: Dataset({\n",
|
163 |
-
" features: ['id', 'label', 'evidence', 'claim'],\n",
|
164 |
-
" num_rows: 431\n",
|
165 |
-
" })\n",
|
166 |
-
"})"
|
167 |
-
]
|
168 |
-
},
|
169 |
-
"execution_count": 4,
|
170 |
-
"metadata": {},
|
171 |
-
"output_type": "execute_result"
|
172 |
-
}
|
173 |
-
],
|
174 |
-
"source": [
|
175 |
-
"load_dataset(\"heruberuto/ctkfacts_nli\", use_auth_token=True)"
|
176 |
-
]
|
177 |
-
},
|
178 |
-
{
|
179 |
-
"cell_type": "code",
|
180 |
-
"execution_count": null,
|
181 |
-
"metadata": {},
|
182 |
-
"outputs": [],
|
183 |
-
"source": []
|
184 |
-
}
|
185 |
-
],
|
186 |
-
"metadata": {
|
187 |
-
"interpreter": {
|
188 |
-
"hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e"
|
189 |
-
},
|
190 |
-
"kernelspec": {
|
191 |
-
"display_name": "Python 3.9.7 64-bit",
|
192 |
-
"language": "python",
|
193 |
-
"name": "python3"
|
194 |
-
},
|
195 |
-
"language_info": {
|
196 |
-
"codemirror_mode": {
|
197 |
-
"name": "ipython",
|
198 |
-
"version": 3
|
199 |
-
},
|
200 |
-
"file_extension": ".py",
|
201 |
-
"mimetype": "text/x-python",
|
202 |
-
"name": "python",
|
203 |
-
"nbconvert_exporter": "python",
|
204 |
-
"pygments_lexer": "ipython3",
|
205 |
-
"version": "3.9.7"
|
206 |
-
},
|
207 |
-
"orig_nbformat": 4
|
208 |
-
},
|
209 |
-
"nbformat": 4,
|
210 |
-
"nbformat_minor": 2
|
211 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
generate_readme.py
DELETED
File without changes
|
test.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import datasets
|
2 |
-
|
3 |
-
datasets.load_dataset("ctkfacts_nli.py")
|
|
|
|
|
|
|
|