Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
Korean
Size:
10K - 100K
ArXiv:
Tags:
question-generation
License:
import os | |
from glob import glob | |
from datasets import load_dataset | |
os.makedirs('./reference_files', exist_ok=True) | |
for split in ['validation', 'test']: | |
dataset = load_dataset('asahi417/qg_koquad', split=split, download_mode = 'force_redownload') | |
for data in ['question', 'answer', 'sentence', 'paragraph']: | |
with open('./reference_files/{}-{}.txt'.format(data, split), 'w') as f: | |
if data == 'paragraph': | |
tmp_data = dataset['paragraph_id'] | |
else: | |
tmp_data = dataset[data] | |
f.write('\n'.join([i.replace('\n', '.') for i in tmp_data])) | |
length = [len(open(i).read().split('\n')) for i in glob(f'reference_files/*{split}.txt')] | |
assert len(list(set(length))) == 1, length | |
assert length[0] == len(dataset), f"{length[0]} != {len(dataset)}" | |