Padomin commited on
Commit
bcbf8ed
1 Parent(s): 0bd28e7

Create new file

Browse files
Files changed (1) hide show
  1. coraal-asr.py +146 -0
coraal-asr.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Common Voice Dataset"""
16
+
17
+ import json
18
+ import os
19
+ from copy import deepcopy
20
+ import re
21
+ import unicodedata
22
+ from more_itertools import windowed
23
+ import datasets
24
+
25
+ _CITATION = """\
26
+ """
27
+
28
+ _DESCRIPTION = """\
29
+ coraalを音声認識した誤り訂正用データセット
30
+ """
31
+ _HOMEPAGE = ""
32
+ _LICENSE = ""
33
+
34
+ URLS = {
35
+ "v1": {
36
+ "text": "https://huggingface.co/datasets/Padomin/coraal-asr/resolve/main/coraal-asr.tar.gz",
37
+ }
38
+ }
39
+
40
+
41
+ class coraal_asr_config(datasets.BuilderConfig):
42
+ def __init__(self, n_fronts=0, n_bodies=1, n_rears=0, front_prefix='front:\n', body_prefix='body:\n', rear_prefix='rear:\n', **kwargs):
43
+ super(coraal_asr_config, self).__init__(**kwargs)
44
+ self.n_fronts = n_fronts
45
+ self.n_bodies = n_bodies
46
+ self.n_rears = n_rears
47
+ self.front_prefix = front_prefix
48
+ self.body_prefix = body_prefix
49
+ self.rear_prefix = rear_prefix
50
+
51
+ class coraal_asr(datasets.GeneratorBasedBuilder):
52
+ VERSION = datasets.Version("0.2.0")
53
+ BUILDER_CONFIGS = [
54
+ coraal_asr_config(name="v1", version=VERSION),
55
+ ]
56
+ DEFAULT_CONFIG_NAME = "v1" # It's not mandatory to have a default configuration. Just use one if it make sense.
57
+ BUILDER_CONFIG_CLASS = coraal_asr_config
58
+
59
+ def _info(self):
60
+ feature_dict = {
61
+ "text": datasets.Value("string"),
62
+ "text_asr": datasets.Value("string"),
63
+ "src": datasets.Value("string"),
64
+ "tgt": datasets.Value("string"),
65
+ "id": datasets.Value("string")
66
+ }
67
+
68
+ features = datasets.Features(feature_dict)
69
+ return datasets.DatasetInfo(
70
+ description=_DESCRIPTION,
71
+ features=features,
72
+ supervised_keys=None,
73
+ homepage=_HOMEPAGE,
74
+ license=_LICENSE,
75
+ citation=_CITATION,
76
+ )
77
+
78
+ def _split_generators(self, dl_manager):
79
+ """Returns SplitGenerators."""
80
+ if "v1" in self.config.name:
81
+ urls = deepcopy(URLS["v1"])
82
+
83
+ dl_path = dl_manager.download_and_extract(urls)
84
+
85
+ return [
86
+ datasets.SplitGenerator(
87
+ name=datasets.Split.TRAIN,
88
+ gen_kwargs={
89
+ "filepath": os.path.join(dl_path["text"], "train.jsonl"),
90
+ "split": "train",
91
+ },
92
+ ),
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.TEST,
95
+ gen_kwargs={
96
+ "filepath": os.path.join(dl_path["text"], "test.jsonl"),
97
+ "split": "test",
98
+ },
99
+ ),
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.VALIDATION,
102
+ gen_kwargs={
103
+ "filepath": os.path.join(dl_path["text"], "validation.jsonl"),
104
+ "split": "validation",
105
+ },
106
+ ),
107
+ ]
108
+
109
+ def _generate_examples(self, filepath, split):
110
+ """Yields examples."""
111
+ id_ = 0
112
+ with open(filepath, encoding="utf-8") as f:
113
+ for line in f:
114
+ doc = json.loads(line)
115
+ utterances = doc['utterances']
116
+ # divide text and asr
117
+ texts_asr = [utt['asr'] for utt in utterances]
118
+ texts = [utt['text'] for utt in utterances]
119
+ # window considering front and rear contexts
120
+ windowed_texts_asr = windowed([''] * self.config.n_fronts + texts_asr + [''] * self.config.n_rears, self.config.n_bodies + self.config.n_fronts + self.config.n_rears)
121
+ windowed_texts = windowed(texts, self.config.n_bodies)
122
+
123
+ for text_asr, text, utt in zip(windowed_texts_asr, windowed_texts, utterances):
124
+ src = ''
125
+ if self.config.n_fronts > 0:
126
+ src += self.config.front_prefix
127
+ src += '\n'.join(text_asr[:self.config.n_fronts])
128
+ src += '\n'
129
+ src += self.config.body_prefix
130
+ src += '\n'.join(text_asr[self.config.n_fronts:self.config.n_fronts + self.config.n_bodies])
131
+ if self.config.n_rears > 0:
132
+ src += '\n' + self.config.rear_prefix
133
+ src += '\n'.join(text_asr[self.config.n_fronts + self.config.n_bodies:])
134
+ tgt = '\n'.join(text)
135
+
136
+ data = {
137
+ "text": utt["text"],
138
+ "text_asr": utt["asr"],
139
+ 'src': src,
140
+ 'tgt': tgt,
141
+ 'id': doc["id"],
142
+ }
143
+
144
+ yield id_, data
145
+
146
+ id_ += 1