parquet-converter commited on
Commit
05983fc
·
1 Parent(s): ebf0fda

Update parquet files

Browse files
README.md DELETED
@@ -1,27 +0,0 @@
1
- dataset_info:
2
- features:
3
- - name: id
4
- dtype: string
5
- - name: post_text
6
- sequence: string
7
- - name: pre_text
8
- sequence: string
9
- - name: question
10
- dtype: string
11
- - name: answers
12
- dtype: string
13
- - name: table
14
- sequence:
15
- sequence: string
16
- splits:
17
- - name: train
18
- num_bytes: 26984130
19
- num_examples: 6251
20
- - name: validation
21
- num_bytes: 3757103
22
- num_examples: 883
23
- - name: test
24
- num_bytes: 4838430
25
- num_examples: 1147
26
- download_size: 21240722
27
- dataset_size: 35579663
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
default/finqa-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dfaa2a0fc5f17c06558a59abcd37654ab5de4cbbe6d1fdc89ba563ded534908
3
+ size 1144390
default/finqa-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5252c3df1a17b8959c0040171932fffd8e1b59e01b509389ea559599f3a62156
3
+ size 12187769
default/finqa-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:320a13453c36b0b8f5734dd27d191e4884376c538947c0ebc2ac7737863f7b5d
3
+ size 764909
finqa.py DELETED
@@ -1,87 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """The WikiTableQuestions dataset is a large-scale dataset for the task of question answering on semi-structured tables."""
15
-
16
- import os
17
-
18
- import datasets
19
-
20
- import json
21
-
22
- _HOMEPAGE = "https://finqasite.github.io/index.html"
23
-
24
- _GIT_ARCHIVE_URL = (
25
- "https://github.com/czyssrs/FinQA/archive/refs/heads/main.zip"
26
- )
27
-
28
- class FinQA(datasets.GeneratorBasedBuilder):
29
-
30
- VERSION = datasets.Version("1.0.0")
31
-
32
- def _info(self):
33
- features = datasets.Features(
34
- {
35
- # "filename": datasets.Value("string"),
36
- "id": datasets.Value("string"),
37
- "post_text": datasets.features.Sequence(datasets.Value("string")),
38
- "pre_text": datasets.features.Sequence(datasets.Value("string")),
39
- "question": datasets.Value("string"),
40
- "answer": datasets.Value("string"),
41
- "gold_evidence": datasets.features.Sequence(datasets.Value("string")),
42
- "table": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))),
43
- }
44
- )
45
- return datasets.DatasetInfo(
46
- features=features,
47
- )
48
-
49
- def _split_generators(self, dl_manager):
50
- extracted_path = dl_manager.download_and_extract(_GIT_ARCHIVE_URL)
51
- print(extracted_path)
52
- train_file = os.path.join(extracted_path, "FinQA-main", "dataset", "train.json")
53
- dev_file = os.path.join(extracted_path, "FinQA-main", "dataset", "dev.json")
54
- test_file = os.path.join(extracted_path, "FinQA-main", "dataset", "test.json")
55
-
56
- return [
57
- datasets.SplitGenerator(
58
- name=datasets.Split.TRAIN,
59
- gen_kwargs={"main_filepath": train_file},
60
- ),
61
- datasets.SplitGenerator(
62
- name=datasets.Split.VALIDATION,
63
- gen_kwargs={"main_filepath": dev_file},
64
- ),
65
- datasets.SplitGenerator(
66
- name=datasets.Split.TEST,
67
- gen_kwargs={"main_filepath": test_file},
68
- ),
69
- ]
70
-
71
-
72
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
73
- def _generate_examples(self, main_filepath):
74
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
75
- with open(main_filepath, encoding="utf-8") as f:
76
- # skip the first line since it is the tsv header
77
- lines = json.load(f)
78
- for idx, example in enumerate(lines):
79
- yield idx, {
80
- "id": example['id'],
81
- "post_text": example['post_text'],
82
- "pre_text": example['pre_text'],
83
- "question": example['qa']['question'],
84
- "answer": example['qa']['answer'],
85
- "table": example['table'],
86
- "gold_evidence": list(example['qa']['gold_inds'].values())
87
- }