Datasets:
lmqg
/

Languages:
Italian
ArXiv:
License:
asahi417 commited on
Commit
d621c15
1 Parent(s): c9e25d8
Files changed (48) hide show
  1. README.md +0 -0
  2. data/processed/test00.jsonl +0 -0
  3. data/processed/test01.jsonl +0 -0
  4. data/processed/test02.jsonl +0 -0
  5. data/processed/test03.jsonl +0 -0
  6. data/processed/test04.jsonl +0 -0
  7. data/processed/test05.jsonl +0 -0
  8. data/processed/train00.jsonl +0 -0
  9. data/processed/train01.jsonl +0 -0
  10. data/processed/train02.jsonl +0 -0
  11. data/processed/train03.jsonl +0 -0
  12. data/processed/train04.jsonl +0 -0
  13. data/processed/train05.jsonl +0 -0
  14. data/processed/train06.jsonl +0 -0
  15. data/processed/train07.jsonl +0 -0
  16. data/processed/train08.jsonl +0 -0
  17. data/processed/train09.jsonl +0 -0
  18. data/processed/train10.jsonl +0 -0
  19. data/processed/train11.jsonl +0 -0
  20. data/processed/train12.jsonl +0 -0
  21. data/processed/train13.jsonl +0 -0
  22. data/processed/train14.jsonl +0 -0
  23. data/processed/train15.jsonl +0 -0
  24. data/processed/train16.jsonl +0 -0
  25. data/processed/train17.jsonl +0 -0
  26. data/processed/train18.jsonl +0 -0
  27. data/processed/train19.jsonl +0 -0
  28. data/processed/train20.jsonl +0 -0
  29. data/processed/train21.jsonl +0 -0
  30. data/processed/train22.jsonl +0 -0
  31. data/processed/train23.jsonl +0 -0
  32. data/processed/train24.jsonl +0 -0
  33. data/processed/train25.jsonl +0 -0
  34. data/processed/train26.jsonl +0 -0
  35. data/processed/train27.jsonl +0 -0
  36. data/processed/train28.jsonl +0 -0
  37. data/processed/train29.jsonl +0 -0
  38. data/processed/train30.jsonl +0 -0
  39. data/processed/train31.jsonl +0 -0
  40. data/processed/validation00.jsonl +0 -0
  41. data/processed/validation01.jsonl +0 -0
  42. data/processed/validation02.jsonl +0 -0
  43. data/processed/validation03.jsonl +0 -0
  44. data/processed/validation04.jsonl +0 -0
  45. data/processed/validation05.jsonl +0 -0
  46. generate_reference_files.py +19 -0
  47. process.py +125 -0
  48. qg_itquad.py +65 -0
README.md ADDED
File without changes
data/processed/test00.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/test01.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/test02.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/test03.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/test04.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/test05.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train00.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train01.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train02.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train03.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train04.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train05.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train06.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train07.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train08.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train09.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train10.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train11.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train12.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train13.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train14.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train15.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train16.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train17.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train18.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train19.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train20.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train21.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train22.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train23.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train24.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train25.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train26.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train27.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train28.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train29.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train30.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/train31.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/validation00.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/validation01.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/validation02.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/validation03.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/validation04.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/processed/validation05.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
generate_reference_files.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from glob import glob
3
+ from datasets import load_dataset
4
+
5
+ os.makedirs('./reference_files', exist_ok=True)
6
+
7
+
8
+ for split in ['validation', 'test']:
9
+ dataset = load_dataset('asahi417/qg_itquad', split=split)
10
+ for data in ['question', 'answer', 'sentence', 'paragraph']:
11
+ with open('./reference_files/{}-{}.txt'.format(data, split), 'w') as f:
12
+ if data == 'paragraph':
13
+ tmp_data = dataset['paragraph_id']
14
+ else:
15
+ tmp_data = dataset[data]
16
+ f.write('\n'.join([i.replace('\n', '.') for i in tmp_data]))
17
+
18
+ length = [len(open(i).read().split('\n')) for i in glob(f'reference_files/*{split}.txt')]
19
+ assert len(list(set(length))) == 1, length
process.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ gsplit -l 1500 -d --additional-suffix=.jsonl test.jsonl test
3
+ gsplit -l 1500 -d --additional-suffix=.jsonl train.jsonl train
4
+ gsplit -l 1500 -d --additional-suffix=.jsonl validation.jsonl validation
5
+ rm -rf test.jsonl
6
+ rm -rf train.jsonl
7
+ rm -rf validation.jsonl
8
+ """
9
+ import json
10
+ import os
11
+ import re
12
+ import spacy
13
+ from random import seed, shuffle
14
+ from tqdm import tqdm
15
+ from datasets import load_dataset
16
+
17
+ DATASET_NAME = "squad_it"
18
+ DATASET_TYPES = None
19
+ HIGHLIGHT_TOKEN = '<hl>'
20
+ GENERATE_TEST_SPLIT = True
21
+ SPLITTER = spacy.load('it_core_news_sm')
22
+
23
+
24
+ def get_sentence(document: str): return [str(sent) for sent in SPLITTER(document).sents]
25
+
26
+
27
+ def process_single_data(question: str, paragraph: str, answer: str):
28
+ """ Convert single raw json data into QG format """
29
+ example = {'question': question, 'paragraph': paragraph, 'answer': answer}
30
+ start = example['paragraph'].find(example['answer'])
31
+ end = start + len(answer)
32
+ assert paragraph[start:end] == answer
33
+ # get sentence
34
+ before_tmp = get_sentence(example['paragraph'][:start])
35
+ if len(before_tmp) == 0:
36
+ before = ''
37
+ before_sentence = ''
38
+ else:
39
+ if before_tmp[-1].endswith('.'):
40
+ before = ' '.join(before_tmp)
41
+ before_sentence = ''
42
+ else:
43
+ before = ' '.join(before_tmp[:-1])
44
+ before_sentence = before_tmp[-1]
45
+ before_sentence = before_sentence if before_sentence.endswith(' ') else f'{before_sentence} '
46
+ after_tmp = get_sentence(example['paragraph'][start + len(example['answer']):])
47
+ if len(after_tmp) == 0:
48
+ after = ''
49
+ after_sentence = ''
50
+ else:
51
+ after = ' '.join(after_tmp[1:])
52
+ after_sentence = after_tmp[0]
53
+ after_sentence = after_sentence if after_sentence.startswith(' ') else f' {after_sentence}'
54
+ example['sentence'] = f"{before_sentence}{example['answer']}{after_sentence}"
55
+
56
+ # get paragraph_sentence
57
+ before = '' if before == '' else f'{before} '
58
+ after = '' if after == '' else f' {after}'
59
+ source_text = '{0}{1} {2} {1}{3}'.format(before, HIGHLIGHT_TOKEN, example['sentence'], after)
60
+ example['paragraph_sentence'] = re.sub(r'\s+', ' ', source_text)
61
+
62
+ # get paragraph_answer
63
+ source_text = '{0}{1} {2} {1}{3}'.format(
64
+ example['paragraph'][:start], HIGHLIGHT_TOKEN, example['answer'],
65
+ example['paragraph'][start + len(example['answer']):])
66
+ example['paragraph_answer'] = re.sub(r'\s+', ' ', source_text)
67
+
68
+ # get sentence_answer
69
+ if len(before_tmp) == 0 or before_tmp[-1].endswith('.'):
70
+ before = ''
71
+ else:
72
+ before = before_tmp[-1] if before_tmp[-1].endswith(' ') else f'{before_tmp[-1]} '
73
+ if len(after_tmp) == 0:
74
+ after = ''
75
+ else:
76
+ after = after_tmp[0] if after_tmp[0].startswith(' ') else f' {after_tmp[0]}'
77
+ source_text = '{0}{1} {2} {1}{3}'.format(before, HIGHLIGHT_TOKEN, example['answer'], after)
78
+ example['sentence_answer'] = re.sub(r'\s+', ' ', source_text)
79
+
80
+ return example
81
+
82
+
83
+ if __name__ == '__main__':
84
+ output = './data/processed'
85
+ os.makedirs(output, exist_ok=True)
86
+ if DATASET_TYPES is not None:
87
+ dataset = load_dataset(DATASET_NAME, DATASET_TYPES)
88
+ else:
89
+ dataset = load_dataset(DATASET_NAME)
90
+ for _split in dataset.keys():
91
+ tmp_dataset = dataset[_split]
92
+ with open(f'{output}/{_split}.jsonl', 'w') as f:
93
+ for single_data in tqdm(tmp_dataset):
94
+ question_str = single_data['question']
95
+ paragraph_str = single_data['context']
96
+ answer_str = single_data['answers']['text']
97
+ if type(answer_str) == list:
98
+ answer_str = answer_str[0]
99
+ assert type(answer_str) is str, answer_str
100
+ assert type(question_str) is str, question_str
101
+ assert type(paragraph_str) is str, paragraph_str
102
+ tmp_data = process_single_data(question=question_str, paragraph=paragraph_str, answer=answer_str)
103
+ tmp_data['paragraph_id'] = single_data['id']
104
+ f.write(json.dumps(tmp_data) + '\n')
105
+ if GENERATE_TEST_SPLIT:
106
+ # randomly sample for validation set
107
+ with open(f'{output}/train.jsonl') as f:
108
+ lines_train = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
109
+ with open(f'{output}/test.jsonl') as f:
110
+ size = len([i for i in f.read().split('\n') if len(i) > 0])
111
+ paragraph_ids = list(set([i['paragraph_id'] for i in lines_train]))
112
+ data_train = {p: [i for i in lines_train if i['paragraph_id'] == p] for p in paragraph_ids}
113
+ seed(0)
114
+ shuffle(paragraph_ids)
115
+ data_test = []
116
+ data_train_new = []
117
+ for i in paragraph_ids:
118
+ if len(data_test) < size:
119
+ data_test += data_train[i]
120
+ else:
121
+ data_train_new += data_train[i]
122
+ with open(f'{output}/train.jsonl', 'w') as f:
123
+ f.write('\n'.join([json.dumps(i) for i in data_train_new]))
124
+ with open(f'{output}/validation.jsonl', 'w') as f:
125
+ f.write('\n'.join([json.dumps(i) for i in data_test]))
qg_itquad.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ python -c "from datasets import load_dataset;load_dataset('.')" """
2
+ import json
3
+ from itertools import chain
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+ _DESCRIPTION = """[SQuAD-it](https://huggingface.co/datasets/squad_it) dataset for question generation (QG) task."""
8
+ _URL = 'https://huggingface.co/datasets/asahi417/qg_itquad/raw/main/data/processed'
9
+ _URLS = {
10
+ str(datasets.Split.TEST): [f'{_URL}/test{i:02d}.jsonl' for i in range(8)],
11
+ str(datasets.Split.TRAIN): [f'{_URL}/train{i:02d}.jsonl' for i in range(52)],
12
+ str(datasets.Split.VALIDATION): [f'{_URL}/validation{i:02d}.jsonl' for i in range(8)],
13
+ }
14
+
15
+
16
+ class QGITQuADConfig(datasets.BuilderConfig):
17
+ """BuilderConfig for SquadQG"""
18
+
19
+ def __init__(self, **kwargs):
20
+ """BuilderConfig for SquadQG.
21
+ Args:
22
+ **kwargs: keyword arguments forwarded to super.
23
+ """
24
+ super(QGITQuADConfig, self).__init__(**kwargs)
25
+
26
+
27
+ class QGITQuAD(datasets.GeneratorBasedBuilder):
28
+
29
+ def _info(self):
30
+ return datasets.DatasetInfo(
31
+ description=_DESCRIPTION,
32
+ features=datasets.Features(
33
+ {
34
+ "answer": datasets.Value("string"),
35
+ "question": datasets.Value("string"),
36
+ "sentence": datasets.Value("string"),
37
+ "paragraph": datasets.Value("string"),
38
+ "sentence_answer": datasets.Value("string"),
39
+ "paragraph_answer": datasets.Value("string"),
40
+ "paragraph_sentence": datasets.Value("string"),
41
+ "paragraph_id": datasets.Value("string")
42
+ }
43
+ ),
44
+ supervised_keys=None,
45
+ homepage="https://github.com/asahi417/lm-question-generation"
46
+ )
47
+
48
+ def _split_generators(self, dl_manager):
49
+ downloaded_file = dl_manager.download_and_extract(_URLS)
50
+ return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
51
+ for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
52
+
53
+ def _generate_examples(self, filepaths):
54
+ _key = 0
55
+ for filepath in filepaths:
56
+ logger.info("generating examples from = %s", filepath)
57
+ with open(filepath, encoding="utf-8") as f:
58
+ _list = f.read().split('\n')
59
+ if _list[-1] == '':
60
+ _list = _list[:-1]
61
+ for i in _list:
62
+ data = json.loads(i)
63
+ yield _key, data
64
+ _key += 1
65
+