Datasets:
metadata
license: cc-by-4.0
dataset_info:
features:
- name: screen_id
dtype: string
- name: question
dtype: string
- name: ground_truth
list:
- name: full_answer
dtype: string
- name: ui_elements
list:
- name: bounds
sequence: float64
- name: text
dtype: string
- name: vh_index
dtype: int64
- name: file_name
dtype: string
- name: image
dtype: image
splits:
- name: train
num_bytes: 8162486352.98
num_examples: 68980
- name: validation
num_bytes: 1057181449.778
num_examples: 8618
- name: test
num_bytes: 951399950.724
num_examples: 8427
download_size: 3324988803
dataset_size: 10171067753.481998
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
task_categories:
- question-answering
language:
- en
pretty_name: ScreenQA
size_categories:
- 10K<n<100K
Dataset Card for ScreenQA
Question answering on RICO screens: google-research-datasets/screen_qa.
Citation
BibTeX:
@misc{hsiao2024screenqa,
title={ScreenQA: Large-Scale Question-Answer Pairs over Mobile App Screenshots},
author={Yu-Chung Hsiao and Fedir Zubach and Maria Wang and Jindong Chen},
year={2024},
eprint={2209.08199},
archivePrefix={arXiv},
primaryClass={cs.CL}
}