Datasets:
Initialize (#1)
Browse files* add poetry files
* add scripts
* add .gitignore
* add files
* update README.md
* update script
* update
* update README.md
* add `push_to_hub.yaml`
* update
- .github/workflows/ci.yaml +58 -0
- .github/workflows/push_to_hub.yaml +25 -0
- .gitignore +176 -0
- JDocQA.py +273 -0
- README.md +281 -0
- poetry.lock +0 -0
- pyproject.toml +25 -0
- tests/JDocQA_test.py +35 -0
- tests/__init__.py +0 -0
.github/workflows/ci.yaml
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: CI
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches: [main]
|
6 |
+
pull_request:
|
7 |
+
branches: [main]
|
8 |
+
paths-ignore:
|
9 |
+
- "README.md"
|
10 |
+
|
11 |
+
jobs:
|
12 |
+
test:
|
13 |
+
runs-on: ubuntu-latest
|
14 |
+
strategy:
|
15 |
+
matrix:
|
16 |
+
python-version: ["3.9", "3.10"]
|
17 |
+
|
18 |
+
steps:
|
19 |
+
- uses: actions/checkout@v3
|
20 |
+
|
21 |
+
- name: Check for TODO and FIXME
|
22 |
+
run: |
|
23 |
+
grep -n --exclude-dir={.git,.github} -rE "FIXME" | while read -r line
|
24 |
+
do
|
25 |
+
file=$(echo $line | cut -d: -f1)
|
26 |
+
lineno=$(echo $line | cut -d: -f2)
|
27 |
+
echo "::warning file=$file,line=$lineno::${line}"
|
28 |
+
done
|
29 |
+
|
30 |
+
if grep --exclude-dir={.git,.github} -rE "TODO"; then
|
31 |
+
exit 1
|
32 |
+
fi
|
33 |
+
|
34 |
+
- name: Set up Python ${{ matrix.python-version }}
|
35 |
+
uses: actions/setup-python@v4
|
36 |
+
with:
|
37 |
+
python-version: ${{ matrix.python-version }}
|
38 |
+
|
39 |
+
- name: Install dependencies
|
40 |
+
run: |
|
41 |
+
pip install -U pip setuptools wheel poetry
|
42 |
+
poetry install
|
43 |
+
|
44 |
+
- name: Format
|
45 |
+
run: |
|
46 |
+
poetry run ruff format --check --diff .
|
47 |
+
|
48 |
+
- name: Lint
|
49 |
+
run: |
|
50 |
+
poetry run ruff check --output-format=github .
|
51 |
+
|
52 |
+
- name: Type check
|
53 |
+
run: |
|
54 |
+
poetry run mypy .
|
55 |
+
|
56 |
+
- name: Run tests
|
57 |
+
run: |
|
58 |
+
poetry run pytest --color=yes -rf
|
.github/workflows/push_to_hub.yaml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Sync to Hugging Face Hub
|
2 |
+
|
3 |
+
on:
|
4 |
+
workflow_run:
|
5 |
+
workflows:
|
6 |
+
- CI
|
7 |
+
branches:
|
8 |
+
- main
|
9 |
+
types:
|
10 |
+
- completed
|
11 |
+
|
12 |
+
jobs:
|
13 |
+
push_to_hub:
|
14 |
+
runs-on: ubuntu-latest
|
15 |
+
|
16 |
+
steps:
|
17 |
+
- name: Checkout repository
|
18 |
+
uses: actions/checkout@v4
|
19 |
+
- name: Push to Huggingface hub
|
20 |
+
env:
|
21 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
22 |
+
HF_USERNAME: ${{ secrets.HF_USERNAME }}
|
23 |
+
run: |
|
24 |
+
git fetch --unshallow
|
25 |
+
git push --force https://${HF_USERNAME}:${HF_TOKEN}@huggingface.co/datasets/${HF_USERNAME}/JDocQA main
|
.gitignore
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Created by https://www.toptal.com/developers/gitignore/api/python
|
2 |
+
# Edit at https://www.toptal.com/developers/gitignore?templates=python
|
3 |
+
|
4 |
+
### Python ###
|
5 |
+
# Byte-compiled / optimized / DLL files
|
6 |
+
__pycache__/
|
7 |
+
*.py[cod]
|
8 |
+
*$py.class
|
9 |
+
|
10 |
+
# C extensions
|
11 |
+
*.so
|
12 |
+
|
13 |
+
# Distribution / packaging
|
14 |
+
.Python
|
15 |
+
build/
|
16 |
+
develop-eggs/
|
17 |
+
dist/
|
18 |
+
downloads/
|
19 |
+
eggs/
|
20 |
+
.eggs/
|
21 |
+
lib/
|
22 |
+
lib64/
|
23 |
+
parts/
|
24 |
+
sdist/
|
25 |
+
var/
|
26 |
+
wheels/
|
27 |
+
share/python-wheels/
|
28 |
+
*.egg-info/
|
29 |
+
.installed.cfg
|
30 |
+
*.egg
|
31 |
+
MANIFEST
|
32 |
+
|
33 |
+
# PyInstaller
|
34 |
+
# Usually these files are written by a python script from a template
|
35 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
36 |
+
*.manifest
|
37 |
+
*.spec
|
38 |
+
|
39 |
+
# Installer logs
|
40 |
+
pip-log.txt
|
41 |
+
pip-delete-this-directory.txt
|
42 |
+
|
43 |
+
# Unit test / coverage reports
|
44 |
+
htmlcov/
|
45 |
+
.tox/
|
46 |
+
.nox/
|
47 |
+
.coverage
|
48 |
+
.coverage.*
|
49 |
+
.cache
|
50 |
+
nosetests.xml
|
51 |
+
coverage.xml
|
52 |
+
*.cover
|
53 |
+
*.py,cover
|
54 |
+
.hypothesis/
|
55 |
+
.pytest_cache/
|
56 |
+
cover/
|
57 |
+
|
58 |
+
# Translations
|
59 |
+
*.mo
|
60 |
+
*.pot
|
61 |
+
|
62 |
+
# Django stuff:
|
63 |
+
*.log
|
64 |
+
local_settings.py
|
65 |
+
db.sqlite3
|
66 |
+
db.sqlite3-journal
|
67 |
+
|
68 |
+
# Flask stuff:
|
69 |
+
instance/
|
70 |
+
.webassets-cache
|
71 |
+
|
72 |
+
# Scrapy stuff:
|
73 |
+
.scrapy
|
74 |
+
|
75 |
+
# Sphinx documentation
|
76 |
+
docs/_build/
|
77 |
+
|
78 |
+
# PyBuilder
|
79 |
+
.pybuilder/
|
80 |
+
target/
|
81 |
+
|
82 |
+
# Jupyter Notebook
|
83 |
+
.ipynb_checkpoints
|
84 |
+
|
85 |
+
# IPython
|
86 |
+
profile_default/
|
87 |
+
ipython_config.py
|
88 |
+
|
89 |
+
# pyenv
|
90 |
+
# For a library or package, you might want to ignore these files since the code is
|
91 |
+
# intended to run in multiple environments; otherwise, check them in:
|
92 |
+
.python-version
|
93 |
+
|
94 |
+
# pipenv
|
95 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
96 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
97 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
98 |
+
# install all needed dependencies.
|
99 |
+
#Pipfile.lock
|
100 |
+
|
101 |
+
# poetry
|
102 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
103 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
104 |
+
# commonly ignored for libraries.
|
105 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
106 |
+
#poetry.lock
|
107 |
+
|
108 |
+
# pdm
|
109 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
110 |
+
#pdm.lock
|
111 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
112 |
+
# in version control.
|
113 |
+
# https://pdm.fming.dev/#use-with-ide
|
114 |
+
.pdm.toml
|
115 |
+
|
116 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
117 |
+
__pypackages__/
|
118 |
+
|
119 |
+
# Celery stuff
|
120 |
+
celerybeat-schedule
|
121 |
+
celerybeat.pid
|
122 |
+
|
123 |
+
# SageMath parsed files
|
124 |
+
*.sage.py
|
125 |
+
|
126 |
+
# Environments
|
127 |
+
.env
|
128 |
+
.venv
|
129 |
+
env/
|
130 |
+
venv/
|
131 |
+
ENV/
|
132 |
+
env.bak/
|
133 |
+
venv.bak/
|
134 |
+
|
135 |
+
# Spyder project settings
|
136 |
+
.spyderproject
|
137 |
+
.spyproject
|
138 |
+
|
139 |
+
# Rope project settings
|
140 |
+
.ropeproject
|
141 |
+
|
142 |
+
# mkdocs documentation
|
143 |
+
/site
|
144 |
+
|
145 |
+
# mypy
|
146 |
+
.mypy_cache/
|
147 |
+
.dmypy.json
|
148 |
+
dmypy.json
|
149 |
+
|
150 |
+
# Pyre type checker
|
151 |
+
.pyre/
|
152 |
+
|
153 |
+
# pytype static type analyzer
|
154 |
+
.pytype/
|
155 |
+
|
156 |
+
# Cython debug symbols
|
157 |
+
cython_debug/
|
158 |
+
|
159 |
+
# PyCharm
|
160 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
161 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
162 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
163 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
164 |
+
#.idea/
|
165 |
+
|
166 |
+
### Python Patch ###
|
167 |
+
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
|
168 |
+
poetry.toml
|
169 |
+
|
170 |
+
# ruff
|
171 |
+
.ruff_cache/
|
172 |
+
|
173 |
+
# LSP config files
|
174 |
+
pyrightconfig.json
|
175 |
+
|
176 |
+
# End of https://www.toptal.com/developers/gitignore/api/python
|
JDocQA.py
ADDED
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2024 Shunsuke Kitada and the current dataset script contributor.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
#
|
15 |
+
# This script was generated from shunk031/cookiecutter-huggingface-datasets.
|
16 |
+
#
|
17 |
+
import json
|
18 |
+
import os
|
19 |
+
import re
|
20 |
+
from typing import List
|
21 |
+
|
22 |
+
import datasets as ds
|
23 |
+
from datasets.utils.logging import get_logger
|
24 |
+
|
25 |
+
logger = get_logger(__name__)
|
26 |
+
|
27 |
+
_CITATION = """\
|
28 |
+
@inproceedings{JDocQA_2024,
|
29 |
+
title = "JDocQA: Japanese Document Question Answering Dataset for Generative Language Models",
|
30 |
+
author = "Onami, Eri and
|
31 |
+
Kurita, Shuhei and
|
32 |
+
Miyanishi, Taiki and
|
33 |
+
Watanabe, Taro",
|
34 |
+
booktitle = "The 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation",
|
35 |
+
month = may,
|
36 |
+
year = "2024",
|
37 |
+
address = "Trino, Italy",
|
38 |
+
abstract = "Document question answering is a task of question answering on given documents such as reports, slides, pamphlets, and websites, and it is a truly demanding task as paper and electronic forms of documents are so common in our society. This is known as a quite challenging task because it requires not only text understanding but also understanding of figures and tables, and hence visual question answering (VQA) methods are often examined in addition to textual approaches. We introduce Japanese Document Question Answering (JDocQA), a large-scale document-based QA dataset, essentially requiring both visual and textual information to answer questions, which comprises 5,504 documents in PDF format and annotated 11,600 question-and-answer instances in Japanese. Each QA instance includes references to the document pages and bounding boxes for the answer clues. We incorporate multiple categories of questions and unanswerable questions from the document for realistic question-answering applications. We empirically evaluate the effectiveness of our dataset with text-based large language models (LLMs) and multimodal models. Incorporating unanswerable questions in finetuning may contribute to harnessing the so-called hallucination generation.",
|
39 |
+
}
|
40 |
+
"""
|
41 |
+
|
42 |
+
_DESCRIPTION = """\
|
43 |
+
Japanese Document Question Answering (JDocQA), a large-scale document-based QA dataset, essentially requiring both visual and textual information to answer questions, which comprises 5,504 documents in PDF format and annotated 11,600 question-and-answer instances in Japanese.
|
44 |
+
"""
|
45 |
+
|
46 |
+
_HOMEPAGE = "https://github.com/mizuumi/JDocQA"
|
47 |
+
|
48 |
+
_LICENSE = "JDocQA dataset annotations are distributed under CC BY-SA 4.0."
|
49 |
+
|
50 |
+
_URLS = {
|
51 |
+
"annotations": {
|
52 |
+
"train": "https://raw.githubusercontent.com/mizuumi/JDocQA/main/dataset/annotation_files/jdocqa_train_all.json",
|
53 |
+
"validation": "https://github.com/mizuumi/JDocQA/raw/main/dataset/annotation_files/jdocqa_validation_all.json",
|
54 |
+
"test": "https://github.com/mizuumi/JDocQA/raw/main/dataset/annotation_files/jdocqa_test_all.json",
|
55 |
+
},
|
56 |
+
"documents": "https://vlm-lab-fileshare.s3.ap-northeast-1.amazonaws.com/pdf_files.zip",
|
57 |
+
}
|
58 |
+
|
59 |
+
|
60 |
+
class JDocQADataset(ds.GeneratorBasedBuilder):
|
61 |
+
"""A class for loading JDocQA dataset."""
|
62 |
+
|
63 |
+
VERSION = ds.Version("1.0.0")
|
64 |
+
|
65 |
+
BUILDER_CONFIGS = [
|
66 |
+
ds.BuilderConfig(
|
67 |
+
version=VERSION,
|
68 |
+
description=_DESCRIPTION,
|
69 |
+
),
|
70 |
+
]
|
71 |
+
|
72 |
+
def _info(self) -> ds.DatasetInfo:
|
73 |
+
features = ds.Features(
|
74 |
+
{
|
75 |
+
"answer": ds.Value("string"),
|
76 |
+
"answer_type": ds.ClassLabel(
|
77 |
+
num_classes=4,
|
78 |
+
names=["yes/no", "factoid", "numerical", "open-ended"],
|
79 |
+
),
|
80 |
+
"context": ds.Value("string"),
|
81 |
+
"multiple_select_answer": ds.ClassLabel(
|
82 |
+
num_classes=4,
|
83 |
+
names=["A", "B", "C", "D"],
|
84 |
+
),
|
85 |
+
"multiple_select_question": ds.Sequence(ds.Value("string")),
|
86 |
+
"no_reason": ds.ClassLabel(
|
87 |
+
num_classes=4,
|
88 |
+
names=["0", "1", "2", "1,2"],
|
89 |
+
),
|
90 |
+
"normalized_answer": ds.Value("string"),
|
91 |
+
"original_answer": ds.Value("string"),
|
92 |
+
"original_context": ds.Value("string"),
|
93 |
+
"original_question": ds.Value("string"),
|
94 |
+
"pdf_category": ds.ClassLabel(
|
95 |
+
num_classes=4,
|
96 |
+
names=["Document", "Kouhou", "Slide", "Website"],
|
97 |
+
),
|
98 |
+
"pdf_name": ds.Value("string"),
|
99 |
+
"question": ds.Value("string"),
|
100 |
+
"question_number": ds.Sequence(ds.Value("uint64")),
|
101 |
+
"question_page_number": ds.Value("string"),
|
102 |
+
"reason_of_answer_bbox": ds.Sequence(ds.Value("string")),
|
103 |
+
"text_from_ocr_pdf": ds.Value("string"),
|
104 |
+
"text_from_pdf": ds.Value("string"),
|
105 |
+
"type_of_image": ds.Sequence(
|
106 |
+
ds.ClassLabel(
|
107 |
+
num_classes=10,
|
108 |
+
names=[
|
109 |
+
"Null",
|
110 |
+
"Table",
|
111 |
+
"Bar chart",
|
112 |
+
"Line chart",
|
113 |
+
"Pie chart",
|
114 |
+
"Map",
|
115 |
+
"Other figures",
|
116 |
+
"Mixtured writing style from left to the right and from upside to the downside",
|
117 |
+
"Drawings",
|
118 |
+
"Others",
|
119 |
+
],
|
120 |
+
)
|
121 |
+
),
|
122 |
+
#
|
123 |
+
# `pdf_filepath` is added to the original dataset for convenience
|
124 |
+
"pdf_filepath": ds.Value("string"),
|
125 |
+
}
|
126 |
+
)
|
127 |
+
return ds.DatasetInfo(
|
128 |
+
description=_DESCRIPTION,
|
129 |
+
features=features,
|
130 |
+
homepage=_HOMEPAGE,
|
131 |
+
license=_LICENSE,
|
132 |
+
citation=_CITATION,
|
133 |
+
)
|
134 |
+
|
135 |
+
def _split_generators(
|
136 |
+
self, dl_manager: ds.DownloadManager
|
137 |
+
) -> List[ds.SplitGenerator]:
|
138 |
+
files = dl_manager.download_and_extract(_URLS)
|
139 |
+
|
140 |
+
tng_ann_filepath = files["annotations"]["train"] # type: ignore
|
141 |
+
val_ann_filepath = files["annotations"]["validation"] # type: ignore
|
142 |
+
tst_ann_filepath = files["annotations"]["test"] # type: ignore
|
143 |
+
|
144 |
+
documents_dirpath = os.path.join(files["documents"], "pdf_files") # type: ignore
|
145 |
+
|
146 |
+
return [
|
147 |
+
ds.SplitGenerator(
|
148 |
+
name=ds.Split.TRAIN, # type: ignore
|
149 |
+
gen_kwargs={
|
150 |
+
"annotation_path": tng_ann_filepath,
|
151 |
+
"documents_dir": documents_dirpath,
|
152 |
+
},
|
153 |
+
),
|
154 |
+
ds.SplitGenerator(
|
155 |
+
name=ds.Split.VALIDATION, # type: ignore
|
156 |
+
gen_kwargs={
|
157 |
+
"annotation_path": val_ann_filepath,
|
158 |
+
"documents_dir": documents_dirpath,
|
159 |
+
},
|
160 |
+
),
|
161 |
+
ds.SplitGenerator(
|
162 |
+
name=ds.Split.TEST, # type: ignore
|
163 |
+
gen_kwargs={
|
164 |
+
"annotation_path": tst_ann_filepath,
|
165 |
+
"documents_dir": documents_dirpath,
|
166 |
+
},
|
167 |
+
),
|
168 |
+
]
|
169 |
+
|
170 |
+
def _convert_answer_type(self, answer_type: str) -> str:
|
171 |
+
if answer_type == "1":
|
172 |
+
return "yes/no"
|
173 |
+
elif answer_type == "2":
|
174 |
+
return "factoid"
|
175 |
+
elif answer_type == "3":
|
176 |
+
return "numerical"
|
177 |
+
elif answer_type == "4":
|
178 |
+
return "open-ended"
|
179 |
+
else:
|
180 |
+
raise ValueError(f"Unknown answer type: {answer_type}")
|
181 |
+
|
182 |
+
def _convert_multiple_select_question(
|
183 |
+
self, multiple_select_question: str
|
184 |
+
) -> List[str]:
|
185 |
+
_, qs = multiple_select_question.split("(A)")
|
186 |
+
|
187 |
+
questions = []
|
188 |
+
for sep in ("(B)", "(C)", "(D)"):
|
189 |
+
q, qs = qs.split(sep)
|
190 |
+
questions.append(q)
|
191 |
+
questions.append(qs)
|
192 |
+
|
193 |
+
assert (
|
194 |
+
len(questions) == 4
|
195 |
+
), f"Before: {multiple_select_question}, After: {questions}"
|
196 |
+
|
197 |
+
questions = [question.rstrip("、") for question in questions]
|
198 |
+
return questions
|
199 |
+
|
200 |
+
def _convert_question_number(self, question_number: str) -> List[int]:
|
201 |
+
return [int(qn) for qn in question_number.split("-")]
|
202 |
+
|
203 |
+
def _convert_reason_of_answer_bbox(self, reason_of_answer_bbox: str) -> List[str]:
|
204 |
+
reason_of_answer_bboxes = [
|
205 |
+
r for r in re.split(r"[.,、、]", reason_of_answer_bbox)
|
206 |
+
]
|
207 |
+
check = [r.isdigit() if r != "" else r == "" for r in reason_of_answer_bboxes]
|
208 |
+
assert all(check), reason_of_answer_bboxes
|
209 |
+
return reason_of_answer_bboxes
|
210 |
+
|
211 |
+
def _convert_type_of_image(self, type_of_image: str) -> List[str]:
|
212 |
+
types_of_image = type_of_image.split(",")
|
213 |
+
|
214 |
+
def convert_to_type_of_image(type_of_image: str) -> str:
|
215 |
+
if type_of_image == "":
|
216 |
+
return "Null"
|
217 |
+
elif type_of_image == "1":
|
218 |
+
return "Table"
|
219 |
+
elif type_of_image == "2":
|
220 |
+
return "Bar chart"
|
221 |
+
elif type_of_image == "3":
|
222 |
+
return "Line chart"
|
223 |
+
elif type_of_image == "4":
|
224 |
+
return "Pie chart"
|
225 |
+
elif type_of_image == "5":
|
226 |
+
return "Map"
|
227 |
+
elif type_of_image == "6":
|
228 |
+
return "Other figures"
|
229 |
+
elif type_of_image == "7":
|
230 |
+
return "Mixtured writing style from left to the right and from upside to the downside"
|
231 |
+
elif type_of_image == "8":
|
232 |
+
return "Drawings"
|
233 |
+
elif type_of_image == "9":
|
234 |
+
return "Others"
|
235 |
+
else:
|
236 |
+
raise ValueError(f"Unknown type of image: {type_of_image}")
|
237 |
+
|
238 |
+
return [convert_to_type_of_image(t) for t in types_of_image]
|
239 |
+
|
240 |
+
def _get_pdf_fielpath(self, pdf_name: str, documents_dir: str) -> str:
|
241 |
+
pdf_filepath = os.path.join(documents_dir, pdf_name)
|
242 |
+
assert os.path.exists(pdf_filepath), f"File not found: {pdf_filepath}"
|
243 |
+
return pdf_filepath
|
244 |
+
|
245 |
+
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
246 |
+
def _generate_examples(self, annotation_path: str, documents_dir: str):
|
247 |
+
with open(annotation_path) as rf:
|
248 |
+
for i, line in enumerate(rf):
|
249 |
+
data = json.loads(line)
|
250 |
+
|
251 |
+
data["answer_type"] = self._convert_answer_type(
|
252 |
+
answer_type=data["answer_type"]
|
253 |
+
)
|
254 |
+
data["multiple_select_question"] = (
|
255 |
+
self._convert_multiple_select_question(
|
256 |
+
multiple_select_question=data["multiple_select_question"]
|
257 |
+
)
|
258 |
+
)
|
259 |
+
data["question_number"] = self._convert_question_number(
|
260 |
+
data["question_number"]
|
261 |
+
)
|
262 |
+
data["reason_of_answer_bbox"] = self._convert_reason_of_answer_bbox(
|
263 |
+
data["reason_of_answer_bbox"]
|
264 |
+
)
|
265 |
+
data["type_of_image"] = self._convert_type_of_image(
|
266 |
+
type_of_image=data["type_of_image"]
|
267 |
+
)
|
268 |
+
data["pdf_filepath"] = self._get_pdf_fielpath(
|
269 |
+
pdf_name=data["pdf_name"],
|
270 |
+
documents_dir=documents_dir,
|
271 |
+
)
|
272 |
+
|
273 |
+
yield i, data
|
README.md
ADDED
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
annotations_creators:
|
3 |
+
- crowdsourced
|
4 |
+
language:
|
5 |
+
- ja
|
6 |
+
language_creators:
|
7 |
+
- found
|
8 |
+
license:
|
9 |
+
- cc-by-sa-4.0
|
10 |
+
multilinguality:
|
11 |
+
- monolingual
|
12 |
+
pretty_name: JDocQA
|
13 |
+
size_categories:
|
14 |
+
- 1K<n<10K
|
15 |
+
source_datasets:
|
16 |
+
- original
|
17 |
+
tags: []
|
18 |
+
task_categories:
|
19 |
+
- question-answering
|
20 |
+
task_ids:
|
21 |
+
- extractive-qa
|
22 |
+
- open-domain-qa
|
23 |
+
- closed-domain-qa
|
24 |
+
---
|
25 |
+
|
26 |
+
# Dataset Card for JDocQA
|
27 |
+
|
28 |
+
[![CI](https://github.com/shunk031/huggingface-datasets_JDocQA/actions/workflows/ci.yaml/badge.svg)](https://github.com/shunk031/huggingface-datasets_JDocQA/actions/workflows/ci.yaml)
|
29 |
+
[![Sync HF](https://github.com/shunk031/huggingface-datasets_JDocQA/actions/workflows/push_to_hub.yaml/badge.svg)](https://github.com/shunk031/huggingface-datasets_JDocQA/actions/workflows/push_to_hub.yaml)
|
30 |
+
|
31 |
+
## Table of Contents
|
32 |
+
- [Dataset Card Creation Guide](#dataset-card-creation-guide)
|
33 |
+
- [Table of Contents](#table-of-contents)
|
34 |
+
- [Dataset Description](#dataset-description)
|
35 |
+
- [Dataset Summary](#dataset-summary)
|
36 |
+
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
37 |
+
- [Languages](#languages)
|
38 |
+
- [Dataset Structure](#dataset-structure)
|
39 |
+
- [Data Instances](#data-instances)
|
40 |
+
- [Data Fields](#data-fields)
|
41 |
+
- [Data Splits](#data-splits)
|
42 |
+
- [Dataset Creation](#dataset-creation)
|
43 |
+
- [Curation Rationale](#curation-rationale)
|
44 |
+
- [Source Data](#source-data)
|
45 |
+
- [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
|
46 |
+
- [Who are the source language producers?](#who-are-the-source-language-producers)
|
47 |
+
- [Annotations](#annotations)
|
48 |
+
- [Annotation process](#annotation-process)
|
49 |
+
- [Who are the annotators?](#who-are-the-annotators)
|
50 |
+
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
51 |
+
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
52 |
+
- [Social Impact of Dataset](#social-impact-of-dataset)
|
53 |
+
- [Discussion of Biases](#discussion-of-biases)
|
54 |
+
- [Other Known Limitations](#other-known-limitations)
|
55 |
+
- [Additional Information](#additional-information)
|
56 |
+
- [Dataset Curators](#dataset-curators)
|
57 |
+
- [Licensing Information](#licensing-information)
|
58 |
+
- [Citation Information](#citation-information)
|
59 |
+
- [Contributions](#contributions)
|
60 |
+
|
61 |
+
## Dataset Description
|
62 |
+
|
63 |
+
- **Homepage:** https://github.com/mizuumi/JDocQA
|
64 |
+
- **Repository:** https://github.com/shunk031/huggingface-datasets_JDocQA
|
65 |
+
- **Paper (Preprint):** https://arxiv.org/abs/2403.19454
|
66 |
+
|
67 |
+
### Dataset Summary
|
68 |
+
|
69 |
+
From [JDocQA's paper](https://arxiv.org/abs/2403.19454):
|
70 |
+
|
71 |
+
> Japanese Document Question Answering (JDocQA), a large-scale document-based QA dataset, essentially requiring both visual and textual information to answer questions, which comprises 5,504 documents in PDF format and annotated 11,600 question-and-answer instances in Japanese.
|
72 |
+
|
73 |
+
### Supported Tasks and Leaderboards
|
74 |
+
|
75 |
+
From [JDocQA's paper](https://arxiv.org/abs/2403.19454):
|
76 |
+
|
77 |
+
> We consider generative question answering where a model generates a textual answer following the document context and textual question. For realistic applications of a wide range of user questions for documents, we prepare four categories of questions: **(1) yes/no**, **(2) factoid**, **(3) numerical**, and **(4) open-ended**.
|
78 |
+
>
|
79 |
+
> - In **yes/no questions**, answers are “yes” or “no.”
|
80 |
+
> - In **factoid questions**, answers are some facts, such as named entities, that typically appear in the given documents.
|
81 |
+
> - In **numerical questions**, answers are numeric values, often including some numerals (some units, e.g., km or Japanese numerals such as “8個 (objects)” and “8人 (persons)”). These numeric values are written in the documents or are calculated from other numbers in the documents.
|
82 |
+
> - In **open-ended questions**, free-form responses are required. For such questions, we aim to assess complex comprehension abilities, such as the ability to form opinions or brief explanations based on the provided contexts and questions.
|
83 |
+
>
|
84 |
+
> Figure 1 presents samples of these four categories of questions. All examples include diverse images and question types related to some Japanese documents collected. We also include unanswerable questions for each question category.
|
85 |
+
|
86 |
+
### Languages
|
87 |
+
|
88 |
+
The language data in JDocQA is in Japanese ([BCP-47 ja-JP](https://www.rfc-editor.org/info/bcp47)).
|
89 |
+
|
90 |
+
## Dataset Structure
|
91 |
+
|
92 |
+
### Data Instances
|
93 |
+
|
94 |
+
```python
|
95 |
+
import datasets as ds
|
96 |
+
|
97 |
+
dataset = ds.load_dataset(path=dataset_path, trust_remote_code=True)
|
98 |
+
|
99 |
+
print(dataset)
|
100 |
+
# DatasetDict({
|
101 |
+
# train: Dataset({
|
102 |
+
# features: ['answer', 'answer_type', 'context', 'multiple_select_answer', 'multiple_select_question', 'no_reason', 'normalized_answer', 'original_answer', 'original_context', 'original_question', 'pdf_category', 'pdf_name', 'question', 'question_number', 'question_page_number', 'reason_of_answer_bbox', 'text_from_ocr_pdf', 'text_from_pdf', 'type_of_image', 'pdf_filepath'],
|
103 |
+
# num_rows: 9290
|
104 |
+
# })
|
105 |
+
# validation: Dataset({
|
106 |
+
# features: ['answer', 'answer_type', 'context', 'multiple_select_answer', 'multiple_select_question', 'no_reason', 'normalized_answer', 'original_answer', 'original_context', 'original_question', 'pdf_category', 'pdf_name', 'question', 'question_number', 'question_page_number', 'reason_of_answer_bbox', 'text_from_ocr_pdf', 'text_from_pdf', 'type_of_image', 'pdf_filepath'],
|
107 |
+
# num_rows: 1134
|
108 |
+
# })
|
109 |
+
# test: Dataset({
|
110 |
+
# features: ['answer', 'answer_type', 'context', 'multiple_select_answer', 'multiple_select_question', 'no_reason', 'normalized_answer', 'original_answer', 'original_context', 'original_question', 'pdf_category', 'pdf_name', 'question', 'question_number', 'question_page_number', 'reason_of_answer_bbox', 'text_from_ocr_pdf', 'text_from_pdf', 'type_of_image', 'pdf_filepath'],
|
111 |
+
# num_rows: 1176
|
112 |
+
# })
|
113 |
+
# })
|
114 |
+
```
|
115 |
+
|
116 |
+
An example of the JDocQA dataset (training set) looks as follows:
|
117 |
+
|
118 |
+
```json
|
119 |
+
{
|
120 |
+
"answer": "本文中に記載がありません",
|
121 |
+
"answer_type": 3,
|
122 |
+
"context": "_II.調査内容(2.虹本マニュアルの策定)(3)基本マニュアルの記載項目前述の方針等を踏まえ、基本マニュアルの具体的な記載項目(目次だて)は以下のとおりとする。小項目・内容Iはじめにマニュアルの目的、立会義務_(消防法第13条第3項)安全対策の基本事項(SS立会い者とローリー乗務員による相互確認・相互協力の重要性)ローリー荷卸しの手順の基本的流れ ※詳細版のみIIローリー荷邊し時の作業内容1ローリー到着時(荷爺し前)1.ローリー停車位置の確認:計導2.納品書の相互確認3.アースの接続4.消火器の配置5.積荷の相互確認6.地下タンク和在庫及び和荷卸し数量の確認7・詳細版には、各項目ごとに、_-SS立会い者、ローリー乗務2荷邊し時(ホースの結合)03-.注油口の確認、ホースの結合を記載3.ベーパー回収ホース接続ee4荷卸し作業中の安全馬視特に重要な基本事3荷卸し終了時1.配管内、ホース内の残油の確認2.注油口の確認ハッチ内残油確認3.在庫確認4.5.後片付け6.ローリーの退出自事故・災害時の対処(初動対応)1コンタミ(混油)事故発見時(緊急処置)、連絡2オーバーフロー(漏油)事故発見時(緊急処置)、連絡3火災発見時(緊急処置)、初期消火IV通報・緊急連絡緊急時連絡先、通報内容参考チェックリスト例",
|
123 |
+
"multiple_select_answer": 3,
|
124 |
+
"multiple_select_question": ["はい", "いいえ", "わからない", "本文中に記載が見つけられませんでした"],
|
125 |
+
"no_reason": 0,
|
126 |
+
"normalized_answer": "本文中に記載がありません",
|
127 |
+
"original_answer": "本文中に記載が見つけられませんでした",
|
128 |
+
"original_context": "_II.調査内容(2.虹本マニュアルの策定)(3)基本マニュアルの記載項目前述の方針等を踏まえ、基本マニュアルの具体的な記載項目(目次だて)は以下のとおりとする。小項目・内容Iはじめにマニュアルの目的、立会義務_(消防法第13条第3項)安全対策の基本事項(SS立会い者とローリー乗務員による相互確認・相互協力の重要性)ローリー荷卸しの手順の基本的流れ ※詳細版のみIIローリー荷邊し時の作業内容1ローリー到着時(荷爺し前)1.ローリー停車位置の確認:計導2.納品書の相互確認3.アースの接続4.消火器の配置5.積荷の相互確認6.地下タンク和在庫及び和荷卸し数量の確認7・詳細版には、各項目ごとに、_-SS立会い者、ローリー乗務2荷邊し時(ホースの結合)03-.注油口の確認、ホースの結合を記載3.ベーパー回収ホース接続ee4荷卸し作業中の安全馬視特に重要な基本事3 荷卸し終了時1.配管内、ホース内の残油の確認2.注油口の確認ハッチ内残油確認3.在庫確認4.5.後片付け6.ローリーの退出自事故・災害時の対処(初動対応)1コンタミ(混油)事故発見時(緊急処置)、連絡2オーバーフロー(漏油)事故発見時(緊急処置)、連絡3火災発見時(緊急処置)、初期消火IV通報・緊急連絡緊急時連絡先、通報内容参考チェックリスト例",
|
129 |
+
"original_question": "基本マニュアルの具体的な記載項目としている事故・災害時の対処の中で、オーバーフロー(漏油)事故が起こった場合は発見時にどのような処置が求められますか?",
|
130 |
+
"pdf_category": 2,
|
131 |
+
"pdf_name": "public_document00152.pdf",
|
132 |
+
"question": "基本マニュアルの具体的な記載項目としている事故・災害時の対処の中で、オーバーフロー(漏油)事故が起こった場合は発見時にどのような処置が求められますか?\n解答は自由に記述してください。",
|
133 |
+
"question_number": [4, 656, 1, 4],
|
134 |
+
"question_page_number": "9",
|
135 |
+
"reason_of_answer_bbox": [""],
|
136 |
+
"text_from_ocr_pdf": "_II.調査内容(2.虹本マニュアルの策定)(3)基本マニュアルの記載項目前述の方針等を踏まえ、基本マニュアルの具体的な記載項目(目次だて)は以下のとおりとする。小項目・内容Iはじめにマニュアルの目的、立会義務_(消防法第13条第3項)安全対策の基本事項(SS立会い者とローリー乗務員による相互確認・相互協力の重要性)ローリー荷卸しの手順の基本的流れ ※詳細版のみIIローリー荷邊し時の作業内容1ローリー到着時(荷爺し前)1.ローリー停車位置の確認:計導2.納品書の相互確認3.アースの接続4.消火器の配置5.積荷の相互確認6.地下タンク和在庫及び和荷卸し数量の確認7・詳細版には、各項目ごとに、_-SS立会い者、ローリー乗務2荷邊し時(ホースの結合)03-.注油口の確認、ホースの結合を記載3.ベーパー回収ホース接続ee4荷卸し作業中の安全馬視特に重要な基本事3荷卸し終了時1.配管内、ホース内の残油の確認2.注油口の確認ハッチ内残油確認3.在庫確認4.5.後片付け6.ローリーの退出自事故・災害時の対処(初動対応)1コンタミ(混油)事故発見時(緊急処置)、連絡2オーバーフロー(漏油)事故発見時(緊急処置)、連絡3火災発見時(緊急処置)、初期消火IV通報・緊急連絡緊急時連絡先、通報内容参考チェックリスト例",
|
137 |
+
"text_from_pdf": "",
|
138 |
+
"type_of_image": [0],
|
139 |
+
"pdf_filepath": "/home/shunk031/.cache/huggingface/datasets/downloads/extracted/f3481b9f65c75efec1e5398f76bd8347e64661573961b69423568699f1d7083a/pdf_files/public_document00152.pdf"
|
140 |
+
}
|
141 |
+
```
|
142 |
+
|
143 |
+
### Data Fields
|
144 |
+
|
145 |
+
From [JDocQA's README.md](https://github.com/mizuumi/JDocQA/blob/main/dataset/README.md) and [the paper](https://arxiv.org/abs/2403.19454):
|
146 |
+
|
147 |
+
- `answer`:
|
148 |
+
- `answer_type`: (1) Yes/No questions, (2) Factoid questions, (3) Numerical questions, (4) Open-ended questions.
|
149 |
+
- `context`: Removed noises from 'original_context'.
|
150 |
+
- `multiple_select_answer`:
|
151 |
+
- `multiple_select_question`:
|
152 |
+
- `no_reason`: Unanswerable question-> 0, Answerable question-> 1
|
153 |
+
- `normalized_answer`:
|
154 |
+
- `original_answer`: Annotated answers.
|
155 |
+
- `original_context`: Extracted texts from PDF.
|
156 |
+
- `original_question`: Annotated questions.
|
157 |
+
- `pdf_category`: Document category.
|
158 |
+
- `pdf_name`: PDF name.
|
159 |
+
- `question`: Question query for models.
|
160 |
+
- `question_number`:
|
161 |
+
- `question_page_number`: Where annotators found answer of the questions.
|
162 |
+
- `reason_of_answer_bbox`:
|
163 |
+
- `text_from_ocr_pdf`:
|
164 |
+
- `text_from_pdf`:
|
165 |
+
- `type_of_image`: (1) Table, (2) Bar chart, (3) Line chart, (4) Pie chart, (5) Map, (6) Other figures, (7) Mixtured writing style from left to the right and from upside to the downside, (8) Drawings, (9) Others.
|
166 |
+
- `pdf_filepath`: full file path to the corresponding PDF file.
|
167 |
+
|
168 |
+
### Data Splits
|
169 |
+
|
170 |
+
From [JDocQA's paper](https://www.anlp.jp/proceedings/annual_meeting/2024/pdf_dir/C3-5.pdf):
|
171 |
+
|
172 |
+
> 学習,検定,テストセットにそれぞれ 9,290 件,1,134 件,1,176 件の質問応答が含まれるようにデータセット全体を分割した.同一 PDF ファイルは必ず同一の分割に出現する.
|
173 |
+
|
174 |
+
## Dataset Creation
|
175 |
+
|
176 |
+
### Curation Rationale
|
177 |
+
|
178 |
+
From [JDocQA's paper](https://arxiv.org/abs/2403.19454):
|
179 |
+
|
180 |
+
> To address the demand for a large-scale and fully annotated Japanese document question answering dataset, we introduce a JDocQA dataset by collecting Japanese documents in PDF styles from open-access sources including multiple formats of documents: slides, reports, websites and pamphlets and manually annotating question-answer pairs on them.
|
181 |
+
|
182 |
+
### Source Data
|
183 |
+
|
184 |
+
From [JDocQA's paper](https://arxiv.org/abs/2403.19454):
|
185 |
+
|
186 |
+
> We gather public documents, such as, municipality pamphlets and websites, that are created by Japanese governmental agencies or local governments.
|
187 |
+
|
188 |
+
#### Initial Data Collection and Normalization
|
189 |
+
|
190 |
+
From [JDocQA's paper](https://arxiv.org/abs/2403.19454):
|
191 |
+
|
192 |
+
> We manually collected PDF documents from open-access resources such as Japanese National Diet Library (NDL)’s digital collection, web archive projects (WARP) and websites of Japanese government ministries. We manually gathered documents such as reports, pamphlets or websites that are published by public or quasi-public sectors, such as local governments or public universities through WARP. We also gather Japanese ministry documents such as slides and reports from their websites following the government agencies’ policies. Those documents cover a wide range of topics, for instance, economic policies, education policies, labor issues, health and hygiene, agriculture, forestry, fisheries, culture and arts, history, related to governmental policy or policy guidelines, as well as the everyday affairs of local governments. These documents also include visual elements such as figures, tables, charts, pictures, or mandala charts, complex figures with a combination of texts and objects typically seen in the Japanese public administrative sector’s official document. We classify these documents into four categories, namely, pamphlet, slide, report, and website considering the form of the documents.
|
193 |
+
|
194 |
+
> We extracted texts from PDF documents with PyPDF2. We also notice that some PDF documents are probably created from paper scans, and we cannot extract embedded texts from such documents. Therefore, we extracted texts from the document page images by OCR (Optical Character Recognition) as an alternative source. After the text extraction or OCR, we removed mistakenly recognized symbols and emojis, or duplicated characters from texts when the same character continuously and repeatedly appeared more than five times.
|
195 |
+
|
196 |
+
#### Who are the source language producers?
|
197 |
+
|
198 |
+
From [JDocQA's paper](https://arxiv.org/abs/2403.19454):
|
199 |
+
|
200 |
+
> JDocQA dataset comprises 5,504 files and 11,600 question-and-answer pairs in Japanese.
|
201 |
+
|
202 |
+
### Annotations
|
203 |
+
|
204 |
+
#### Annotation process
|
205 |
+
|
206 |
+
From [JDocQA's paper](https://arxiv.org/abs/2403.19454):
|
207 |
+
|
208 |
+
> As documents include rich textual and visual elements (e.g., graphs, charts, maps, illustrations, and a mix of vertical and horizontal written text), we made question answer pairs that are related to both textual and visual information. We ask annotators to write up two to four question-answer annotations in each document. We also ask not to use any AI-tools such as OpenAI ChatGPT during the annotation process. Each question is accompanied with the supporting facts as marked in red in Figure 1 and Figure 3. We classify a subset of questions that have multiple supporting facts in multiple pages as multi-page questions. Multi-page questions are considerably difficult from their single-page counterparts. For unanswerable questions, we ask annotators to write questions that lack supporting facts in the documents, making them impossible to answer based on the given documents.
|
209 |
+
|
210 |
+
> We prepared three types of images for visual inputs for multimodal models. The first type of images are those of the whole page of the documents including the annotated question answering pairs. The second type of images are those cropped by bounding boxes on which annotators based their answers such as tables or figures of the pages. When multiple bounding boxes are annotated to a single question-answer pair, multiple cropped images are combined together into a single image here. The third type of images are blank (white) images that are used for ablation studies.
|
211 |
+
|
212 |
+
#### Who are the annotators?
|
213 |
+
|
214 |
+
From [JDocQA's paper](https://arxiv.org/abs/2403.19454):
|
215 |
+
|
216 |
+
> We ask 43 annotators in total for the question-answering pairs annotation on documents.
|
217 |
+
|
218 |
+
### Personal and Sensitive Information
|
219 |
+
|
220 |
+
[More Information Needed]
|
221 |
+
|
222 |
+
<!-- State whether the dataset uses identity categories and, if so, how the information is used. Describe where this information comes from (i.e. self-reporting, collecting from profiles, inferring, etc.). See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender. State whether the data is linked to individuals and whether those individuals can be identified in the dataset, either directly or indirectly (i.e., in combination with other data).
|
223 |
+
|
224 |
+
State whether the dataset contains other data that might be considered sensitive (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history).
|
225 |
+
|
226 |
+
If efforts were made to anonymize the data, describe the anonymization process. -->
|
227 |
+
|
228 |
+
## Considerations for Using the Data
|
229 |
+
|
230 |
+
### Social Impact of Dataset
|
231 |
+
|
232 |
+
From [JDocQA's paper](https://arxiv.org/abs/2403.19454):
|
233 |
+
|
234 |
+
> We assume our datasets are useful for both research and development of generative language models and their applications for Japanese document question answering.
|
235 |
+
|
236 |
+
### Discussion of Biases
|
237 |
+
|
238 |
+
From [JDocQA's paper](https://arxiv.org/abs/2403.19454):
|
239 |
+
|
240 |
+
> We carefully avoid private documents and choose considerably public documents published by public or quasi-public sectors for the publicity of our dataset usage. All of the documents and webpages are publicly available online and we follow our institutional rules to gather them. We follow our institutional rules and also consult external advisors for data collection processes.
|
241 |
+
|
242 |
+
### Other Known Limitations
|
243 |
+
|
244 |
+
From [JDocQA's paper](https://arxiv.org/abs/2403.19454):
|
245 |
+
|
246 |
+
> We also consider our dataset with unanswerable questions can contribute to harnessing the hallucination problem of large language models. However, this doesn’t mean that the fintuned models with unanswerable questions do not perform hallucinations at all.
|
247 |
+
|
248 |
+
## Additional Information
|
249 |
+
|
250 |
+
### Dataset Curators
|
251 |
+
|
252 |
+
[More Information Needed]
|
253 |
+
|
254 |
+
<!-- List the people involved in collecting the dataset and their affiliation(s). If funding information is known, include it here. -->
|
255 |
+
|
256 |
+
### Licensing Information
|
257 |
+
|
258 |
+
From [JDocQA's README.md](https://github.com/mizuumi/JDocQA/blob/main/dataset/README.md):
|
259 |
+
|
260 |
+
> JDocQA dataset annotations are distributed under CC BY-SA 4.0.
|
261 |
+
|
262 |
+
### Citation Information
|
263 |
+
|
264 |
+
```bibtex
|
265 |
+
@inproceedings{JDocQA_2024,
|
266 |
+
title = "JDocQA: Japanese Document Question Answering Dataset for Generative Language Models",
|
267 |
+
author = "Onami, Eri and
|
268 |
+
Kurita, Shuhei and
|
269 |
+
Miyanishi, Taiki and
|
270 |
+
Watanabe, Taro",
|
271 |
+
booktitle = "The 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation",
|
272 |
+
month = may,
|
273 |
+
year = "2024",
|
274 |
+
address = "Trino, Italy",
|
275 |
+
abstract = "Document question answering is a task of question answering on given documents such as reports, slides, pamphlets, and websites, and it is a truly demanding task as paper and electronic forms of documents are so common in our society. This is known as a quite challenging task because it requires not only text understanding but also understanding of figures and tables, and hence visual question answering (VQA) methods are often examined in addition to textual approaches. We introduce Japanese Document Question Answering (JDocQA), a large-scale document-based QA dataset, essentially requiring both visual and textual information to answer questions, which comprises 5,504 documents in PDF format and annotated 11,600 question-and-answer instances in Japanese. Each QA instance includes references to the document pages and bounding boxes for the answer clues. We incorporate multiple categories of questions and unanswerable questions from the document for realistic question-answering applications. We empirically evaluate the effectiveness of our dataset with text-based large language models (LLMs) and multimodal models. Incorporating unanswerable questions in finetuning may contribute to harnessing the so-called hallucination generation.",
|
276 |
+
}
|
277 |
+
```
|
278 |
+
|
279 |
+
### Contributions
|
280 |
+
|
281 |
+
Thanks to [@mizuumi](https://github.com/mizuumi) for creating this dataset.
|
poetry.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pyproject.toml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.poetry]
|
2 |
+
name = "huggingface-datasets-jdocqa"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = ""
|
5 |
+
authors = ["Shunsuke KITADA <shunsuke.kitada.0831@gmail.com>"]
|
6 |
+
readme = "README.md"
|
7 |
+
package-mode = false
|
8 |
+
|
9 |
+
[tool.poetry.dependencies]
|
10 |
+
python = "^3.9"
|
11 |
+
datasets = { extras = ["vision"], version = ">=1.0.0" }
|
12 |
+
|
13 |
+
[tool.poetry.group.dev.dependencies]
|
14 |
+
ruff = ">=0.1.5"
|
15 |
+
mypy = ">=1.0.0"
|
16 |
+
pytest = ">=6.0.0"
|
17 |
+
|
18 |
+
[tool.mypy]
|
19 |
+
python_version = "3.9"
|
20 |
+
ignore_missing_imports = true
|
21 |
+
|
22 |
+
[build-system]
|
23 |
+
requires = ["poetry-core"]
|
24 |
+
build-backend = "poetry.core.masonry.api"
|
25 |
+
|
tests/JDocQA_test.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import datasets as ds
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
|
7 |
+
@pytest.fixture
|
8 |
+
def dataset_name() -> str:
|
9 |
+
return "JDocQA"
|
10 |
+
|
11 |
+
|
12 |
+
@pytest.fixture
|
13 |
+
def dataset_path(dataset_name: str) -> str:
|
14 |
+
return f"{dataset_name}.py"
|
15 |
+
|
16 |
+
|
17 |
+
@pytest.mark.skipif(
|
18 |
+
condition=bool(os.environ.get("CI", False)),
|
19 |
+
reason=(
|
20 |
+
"Because this loading script downloads a large dataset, "
|
21 |
+
"we will skip running it on CI."
|
22 |
+
),
|
23 |
+
)
|
24 |
+
def test_load_dataset(
|
25 |
+
dataset_path: str,
|
26 |
+
expected_num_train: int = 9290,
|
27 |
+
expected_num_validation: int = 1134,
|
28 |
+
expected_num_test: int = 1176,
|
29 |
+
):
|
30 |
+
dataset = ds.load_dataset(path=dataset_path, trust_remote_code=True)
|
31 |
+
assert isinstance(dataset, ds.DatasetDict)
|
32 |
+
|
33 |
+
assert dataset["train"].num_rows == expected_num_train
|
34 |
+
assert dataset["validation"].num_rows == expected_num_validation
|
35 |
+
assert dataset["test"].num_rows == expected_num_test
|
tests/__init__.py
ADDED
File without changes
|