Datasets:
lmqg
/

Modalities:
Text
Languages:
Japanese
ArXiv:
Libraries:
Datasets
License:
File size: 3,607 Bytes
0444c95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a1681d2
0444c95
 
 
 
 
a1681d2
0444c95
 
 
a1681d2
0444c95
a1681d2
0444c95
 
 
 
 
 
 
 
 
 
a1681d2
0444c95
 
 
 
 
 
 
 
a1681d2
0444c95
a1681d2
0444c95
a1681d2
0444c95
a1681d2
 
 
0444c95
 
a1681d2
0444c95
 
a1681d2
 
0444c95
 
a1681d2
0444c95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
""" Script to process raw SQuAD file for Question Generation format
gsplit -l 1000 -d --additional-suffix=.jsonl train.jsonl train
gsplit -l 1000 -d --additional-suffix=.jsonl test.jsonl test
gsplit -l 1000 -d --additional-suffix=.jsonl validation.jsonl validation
"""
import json
import os
import re
from tqdm import tqdm
from typing import List, Dict
from datasets import load_dataset
from ja_sentence_split import JASplitter

HIGHLIGHT_TOKEN = '<hl>'
SPLITTER = JASplitter()


def get_sentence(document: str):
    return [str(s) for s in SPLITTER(document)]


def process_single_data(data: Dict):
    """ Convert single raw json data into QG format """
    example = {'question': data["question"], 'paragraph': data["context"]}

    # check answer
    answer_text = data['answers']['text'][0]
    answer_start = data['answers']['answer_start'][0]
    answer_end = answer_start + len(answer_text)
    assert example['paragraph'][answer_start: answer_end] == answer_text
    example['answer'] = answer_text

    # get sentence
    position = example['paragraph'].find(example['answer'])
    assert position != -1
    before_tmp = get_sentence(example['paragraph'][:position])
    if len(before_tmp) == 0:
        before = ''
        before_sentence = ''
    else:
        if before_tmp[-1].endswith('。'):
            before = ' '.join(before_tmp)
            before_sentence = ''
        else:
            before = ' '.join(before_tmp[:-1])
            before_sentence = before_tmp[-1]
    after_tmp = get_sentence(example['paragraph'][position + len(example['answer']):])
    if len(after_tmp) == 0:
        after = ''
        after_sentence = ''
    else:
        after = ' '.join(after_tmp[1:])
        after_sentence = after_tmp[0]
    example['sentence'] = '{}{}{}'.format(before_sentence, example['answer'], after_sentence)

    # get paragraph_sentence
    source_text = '{0}{1}{2}{1}{3}'.format(before, HIGHLIGHT_TOKEN, example['sentence'], after)
    example['paragraph_sentence'] = re.sub(r'\s+', ' ', source_text)

    # get paragraph_answer
    source_text = '{0}{1}{2}{1}{3}'.format(
        example['paragraph'][:position], HIGHLIGHT_TOKEN, example['answer'],
        example['paragraph'][position + len(example['answer']):])
    example['paragraph_answer'] = re.sub(r'\s+', ' ', source_text)

    # get sentence_answer
    if len(before_tmp) == 0 or before_tmp[-1].endswith('。'):
        before = ''
    else:
        before = before_tmp[-1]
    if len(after_tmp) == 0:
        after = ''
    else:
        after = after_tmp[0]
    source_text = '{0}{1}{2}{1}{3}'.format(before, HIGHLIGHT_TOKEN, example['answer'], after)
    example['sentence_answer'] = re.sub(r'\s+', ' ', source_text)
    for _k in example.keys():
        example[_k] = example[_k].replace('。\n\n', '。').replace('。\n', '。')
    return example


if __name__ == '__main__':
    jaquad_data = load_dataset("SkelterLabsInc/JaQuAD")
    data_dev = jaquad_data['validation']
    data_train = jaquad_data['train']
    data_train = data_train.shuffle(seed=1)
    data_test = [data_train[i] for i in range(len(data_dev))]
    data_train = [data_train[i] for i in range(len(data_dev), len(data_train))]

    data_all = {'train': data_train, 'validation': data_dev, 'test': data_test}

    output = './data/processed'
    os.makedirs(output, exist_ok=True)
    for k, _data in data_all.items():
        with open('{}/{}.jsonl'.format(output, k), 'w') as f:
            for single_data in tqdm(_data):
                single_data = process_single_data(single_data)
                f.write(json.dumps(single_data) + '\n')