Datasets:
ibm
/

Languages:
English
ArXiv:
License:
rajmohanc commited on
Commit
e805184
1 Parent(s): 6a03e86

Upload finqa.py

Browse files
Files changed (1) hide show
  1. finqa.py +104 -0
finqa.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import os
17
+ import datasets
18
+
19
+ _CITATION = """\
20
+ @inproceedings{chen2021finqa,
21
+ title={FinQA: A Dataset of Numerical Reasoning over Financial Data},
22
+ author={Chen, Zhiyu and Chen, Wenhu and Smiley, Charese and Shah, Sameena and Borova, Iana and Langdon, Dylan and Moussa, Reema and Beane, Matt and Huang, Ting-Hao and Routledge, Bryan R and others},
23
+ booktitle={Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing},
24
+ pages={3697--3711},
25
+ year={2021}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ A large-scale dataset with 2.8k financial reports for 8k Q&A pairs to study numerical reasoning with structured and unstructured evidence.
31
+ """
32
+
33
+ _HOMEPAGE = "https://finqasite.github.io"
34
+
35
+ _GIT_ARCHIVE_URL = (
36
+ "https://github.com/czyssrs/FinQA/archive/refs/heads/main.zip"
37
+ )
38
+
39
+ class FinQA(datasets.GeneratorBasedBuilder):
40
+ """FinQA: A Large-scale Dataset for Numerical Reasoning over Financial Data."""
41
+
42
+ VERSION = datasets.Version("1.0.0")
43
+
44
+ def _info(self):
45
+ features = datasets.Features(
46
+ {
47
+ "id": datasets.Value("string"),
48
+ "pre_text": datasets.features.Sequence(datasets.Value("string")), # the texts before the table;
49
+ "post_text": datasets.features.Sequence(datasets.Value("string")), # the text after the table;
50
+ "table": datasets.features.Sequence(datasets.features.Sequence(datasets.Value("string"))), # the table;
51
+ "question": datasets.Value("string"), # the question;
52
+ "answer": datasets.Value("string"), # the gold execution result;
53
+ "final_result": datasets.Value("string"), # answer is empty("answer": "") in some samples, so we need this.
54
+ "program_re": datasets.Value("string"), # the reasoning program;
55
+ "gold_inds": datasets.features.Sequence(datasets.Value("string")), # the gold supporting facts;
56
+ }
57
+ )
58
+ return datasets.DatasetInfo(
59
+ description=_DESCRIPTION,
60
+ features=datasets.Features(features),
61
+ supervised_keys=None,
62
+ homepage=_HOMEPAGE,
63
+ citation=_CITATION,
64
+ )
65
+
66
+ def _split_generators(self, dl_manager):
67
+ extracted_path = dl_manager.download_and_extract(_GIT_ARCHIVE_URL)
68
+
69
+ train_file = os.path.join(extracted_path, "FinQA-main", "dataset", "train.json")
70
+ dev_file = os.path.join(extracted_path, "FinQA-main", "dataset", "dev.json")
71
+ test_file = os.path.join(extracted_path, "FinQA-main", "dataset", "test.json")
72
+
73
+ return [
74
+ datasets.SplitGenerator(
75
+ name=datasets.Split.TRAIN,
76
+ gen_kwargs={"dataset_filepath": train_file},
77
+ ),
78
+ datasets.SplitGenerator(
79
+ name=datasets.Split.VALIDATION,
80
+ gen_kwargs={"dataset_filepath": dev_file},
81
+ ),
82
+ datasets.SplitGenerator(
83
+ name=datasets.Split.TEST,
84
+ gen_kwargs={"dataset_filepath": test_file},
85
+ ),
86
+ ]
87
+
88
+
89
+ def _generate_examples(self, dataset_filepath):
90
+ with open(dataset_filepath, encoding="utf-8") as f:
91
+ lines = json.load(f)
92
+ for idx, example in enumerate(lines):
93
+ yield idx, {
94
+ "id": example['id'],
95
+ "pre_text": example['pre_text'],
96
+ "post_text": example['post_text'],
97
+ "table": example['table'],
98
+ "question": example['qa']['question'],
99
+ "answer": example['qa']['answer'],
100
+ 'final_result': str(example['qa']['steps'][-1]['res']),
101
+ "program_re": str(example['qa']['program']),
102
+ "gold_inds": list(example['qa']['gold_inds'].values())
103
+ }
104
+