yuekai commited on
Commit
7f772ae
·
verified ·
1 Parent(s): 6a206cf

Upload folder using huggingface_hub

Browse files
Files changed (34) hide show
  1. .gitattributes +0 -3
  2. aishell1.py +247 -0
  3. data/aishell_cuts_dev.00000000.jsonl.gz +3 -0
  4. data/aishell_cuts_dev.00000000.tar.gz +3 -0
  5. data/aishell_cuts_dev.00000001.jsonl.gz +3 -0
  6. data/aishell_cuts_dev.00000001.tar.gz +3 -0
  7. data/aishell_cuts_test.00000000.jsonl.gz +3 -0
  8. data/aishell_cuts_test.00000000.tar.gz +3 -0
  9. data/aishell_cuts_train.00000000.jsonl.gz +3 -0
  10. data/aishell_cuts_train.00000000.tar.gz +3 -0
  11. data/aishell_cuts_train.00000001.jsonl.gz +3 -0
  12. data/aishell_cuts_train.00000001.tar.gz +3 -0
  13. data/aishell_cuts_train.00000002.jsonl.gz +3 -0
  14. data/aishell_cuts_train.00000002.tar.gz +3 -0
  15. data/aishell_cuts_train.00000003.jsonl.gz +3 -0
  16. data/aishell_cuts_train.00000003.tar.gz +3 -0
  17. data/aishell_cuts_train.00000004.jsonl.gz +3 -0
  18. data/aishell_cuts_train.00000004.tar.gz +3 -0
  19. data/aishell_cuts_train.00000005.jsonl.gz +3 -0
  20. data/aishell_cuts_train.00000005.tar.gz +3 -0
  21. data/aishell_cuts_train.00000006.jsonl.gz +3 -0
  22. data/aishell_cuts_train.00000006.tar.gz +3 -0
  23. data/aishell_cuts_train.00000007.jsonl.gz +3 -0
  24. data/aishell_cuts_train.00000007.tar.gz +3 -0
  25. data/aishell_cuts_train.00000008.jsonl.gz +3 -0
  26. data/aishell_cuts_train.00000008.tar.gz +3 -0
  27. data/aishell_cuts_train.00000009.jsonl.gz +3 -0
  28. data/aishell_cuts_train.00000009.tar.gz +3 -0
  29. data/aishell_cuts_train.00000010.jsonl.gz +3 -0
  30. data/aishell_cuts_train.00000010.tar.gz +3 -0
  31. data/aishell_cuts_train.00000011.jsonl.gz +3 -0
  32. data/aishell_cuts_train.00000011.tar.gz +3 -0
  33. data/aishell_cuts_train.00000012.jsonl.gz +3 -0
  34. data/aishell_cuts_train.00000012.tar.gz +3 -0
.gitattributes CHANGED
@@ -53,6 +53,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
- # Video files - compressed
57
- *.mp4 filter=lfs diff=lfs merge=lfs -text
58
- *.webm filter=lfs diff=lfs merge=lfs -text
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
aishell1.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ Aishell dataset.
16
+ """
17
+
18
+ import os
19
+ import datasets
20
+ from huggingface_hub import list_repo_files
21
+ import gzip
22
+ import json
23
+
24
+ repo_id = "yuekai/aishell1"
25
+
26
+ _DESCRIPTION = """\
27
+ aishell
28
+ """
29
+ _HOMEPAGE = "https://github.com/SpeechColab/Aishell"
30
+
31
+ _SUBSETS = ("train", "dev", "test")
32
+
33
+ _BASE_DATA_URL = f"https://huggingface.co/datasets/{repo_id}/resolve/main/"
34
+
35
+ _AUDIO_ARCHIVE_URL = _BASE_DATA_URL + "data/aishell_cuts_{subset}.{archive_id:08}.tar.gz"
36
+
37
+ _META_URL = _BASE_DATA_URL + "data/aishell_cuts_{subset}.{archive_id:08}.jsonl.gz"
38
+
39
+ FILES = list_repo_files(repo_id, repo_type="dataset")
40
+
41
+ logger = datasets.utils.logging.get_logger(__name__)
42
+
43
+
44
+ class CustomAudioConfig(datasets.BuilderConfig):
45
+ """BuilderConfig for the dataset."""
46
+
47
+ def __init__(self, name, *args, **kwargs):
48
+ """BuilderConfig for the dataset.
49
+ """
50
+ super().__init__(name=name, *args, **kwargs)
51
+ assert name in _SUBSETS, f"Unknown subset {name}"
52
+ self.subsets_to_download = (name,)
53
+
54
+
55
+ class Aishell1(datasets.GeneratorBasedBuilder):
56
+ """
57
+ Aishell is an evolving, multi-domain English speech recognition corpus with 10,000 hours of high quality
58
+ labeled audio suitable for supervised training, and 40,000 hours of total audio suitable for semi-supervised
59
+ and unsupervised training (this implementation contains only labelled data for now).
60
+ Around 40,000 hours of transcribed audio is first collected from audiobooks, podcasts
61
+ and YouTube, covering both read and spontaneous speaking styles, and a variety of topics, such as arts, science,
62
+ sports, etc. A new forced alignment and segmentation pipeline is proposed to create sentence segments suitable
63
+ for speech recognition training, and to filter out segments with low-quality transcription. For system training,
64
+ Aishell provides five subsets of different sizes, 10h, 250h, 1000h, 2500h, and 10000h.
65
+ For our 10,000-hour XL training subset, we cap the word error rate at 4% during the filtering/validation stage,
66
+ and for all our other smaller training subsets, we cap it at 0%. The DEV and TEST evaluation sets, on the other hand,
67
+ are re-processed by professional human transcribers to ensure high transcription quality.
68
+ """
69
+
70
+ VERSION = datasets.Version("1.0.0")
71
+
72
+ BUILDER_CONFIGS = [CustomAudioConfig(name=subset) for subset in _SUBSETS]
73
+
74
+ DEFAULT_WRITER_BATCH_SIZE = 128
75
+
76
+ def _info(self):
77
+ features = datasets.Features(
78
+ {
79
+ "segment_id": datasets.Value("string"),
80
+ "speaker": datasets.Value("string"),
81
+ "text": datasets.Value("string"),
82
+ "audio": datasets.Audio(sampling_rate=16_000),
83
+ "original_full_path": datasets.Value("string"), # relative path to full audio in original data dirs
84
+ }
85
+ )
86
+ return datasets.DatasetInfo(
87
+ description=_DESCRIPTION,
88
+ features=features,
89
+ homepage=_HOMEPAGE,
90
+ )
91
+
92
+ @property
93
+ def _splits_to_subsets(self):
94
+ return {
95
+ "train": ['train'],
96
+ "dev": ["dev"],
97
+ "test": ["test"]
98
+ }
99
+
100
+ def _split_generators(self, dl_manager):
101
+ splits_to_subsets = self._splits_to_subsets
102
+ if self.config.name in {"dev", "test"}:
103
+ splits = (self.config.name,)
104
+ else:
105
+ splits = ("train", "dev", "test")
106
+
107
+ split_to_n_archives = {
108
+ split: int(len([file for file in FILES if f"cuts_{splits_to_subsets[split][0]}" in file]) / 2)
109
+ for split in splits
110
+ }
111
+
112
+ # 2. prepare sharded archives with audio files
113
+ audio_archives_urls = {
114
+ split:
115
+ [
116
+ _AUDIO_ARCHIVE_URL.format(subset=splits_to_subsets[split][0],
117
+ archive_id=i)
118
+ for i in range(split_to_n_archives[split])
119
+ ]
120
+ for split in splits
121
+ }
122
+
123
+ audio_archives_paths = dl_manager.download(audio_archives_urls)
124
+
125
+ local_audio_archives_paths = dl_manager.extract(audio_archives_paths) if not dl_manager.is_streaming \
126
+ else None
127
+
128
+ # 3. prepare sharded metadata csv files
129
+ meta_urls = {
130
+ split: [
131
+ _META_URL.format(subset=splits_to_subsets[split][0], archive_id=i)
132
+ for i in range(split_to_n_archives[split])
133
+ ]
134
+ for split in splits
135
+ }
136
+
137
+ # meta_paths = dl_manager.download_and_extract(meta_urls)
138
+ meta_paths = dl_manager.download(meta_urls)
139
+
140
+ if self.config.name not in {"dev", "test"}:
141
+ result = [
142
+ datasets.SplitGenerator(
143
+ name=datasets.Split.TRAIN,
144
+ gen_kwargs={
145
+ "audio_archives_iterators": [
146
+ dl_manager.iter_archive(archive_path) for archive_path in audio_archives_paths["train"]
147
+ ],
148
+ "local_audio_archives_paths": local_audio_archives_paths[
149
+ "train"] if local_audio_archives_paths else None,
150
+ "meta_paths": meta_paths["train"]
151
+ },
152
+ )
153
+ ]
154
+ if 'dev' in audio_archives_paths:
155
+ result.append(datasets.SplitGenerator(
156
+ name=datasets.Split.VALIDATION,
157
+ gen_kwargs={
158
+ "audio_archives_iterators": [
159
+ dl_manager.iter_archive(archive_path) for archive_path in audio_archives_paths["dev"]
160
+ ],
161
+ "local_audio_archives_paths": local_audio_archives_paths[
162
+ "dev"] if local_audio_archives_paths else None,
163
+ "meta_paths": meta_paths["dev"]
164
+ },
165
+ ))
166
+ if 'test' in audio_archives_paths:
167
+ result.append(datasets.SplitGenerator(
168
+ name=datasets.Split.TEST,
169
+ gen_kwargs={
170
+ "audio_archives_iterators": [
171
+ dl_manager.iter_archive(archive_path) for archive_path in audio_archives_paths["test"]
172
+ ],
173
+ "local_audio_archives_paths": local_audio_archives_paths[
174
+ "test"] if local_audio_archives_paths else None,
175
+ "meta_paths": meta_paths["test"]
176
+ },
177
+ ))
178
+ return result
179
+
180
+ if self.config.name == "dev":
181
+ return [
182
+ datasets.SplitGenerator(
183
+ name=datasets.Split.VALIDATION,
184
+ gen_kwargs={
185
+ "audio_archives_iterators": [
186
+ dl_manager.iter_archive(archive_path) for archive_path in audio_archives_paths["dev"]
187
+ ],
188
+ "local_audio_archives_paths": local_audio_archives_paths[
189
+ "dev"] if local_audio_archives_paths else None,
190
+ "meta_paths": meta_paths["dev"]
191
+ },
192
+ ),
193
+ ]
194
+
195
+ if self.config.name == "test":
196
+
197
+ return [
198
+ datasets.SplitGenerator(
199
+ name=datasets.Split.TEST,
200
+ gen_kwargs={
201
+ "audio_archives_iterators": [
202
+ dl_manager.iter_archive(archive_path) for archive_path in audio_archives_paths["test"]
203
+ ],
204
+ "local_audio_archives_paths": local_audio_archives_paths[
205
+ "test"] if local_audio_archives_paths else None,
206
+ "meta_paths": meta_paths["test"]
207
+ },
208
+ ),
209
+ ]
210
+
211
+ def _generate_examples(self, audio_archives_iterators, local_audio_archives_paths, meta_paths):
212
+
213
+ def load_meta(file_path):
214
+ data = {}
215
+
216
+ with gzip.open(file_path, 'rt', encoding='utf-8') as f:
217
+ for line in f:
218
+ item = json.loads(line)
219
+ data[item["id"]] = item
220
+ return data
221
+
222
+ assert len(audio_archives_iterators) == len(meta_paths)
223
+ if local_audio_archives_paths:
224
+ assert len(audio_archives_iterators) == len(local_audio_archives_paths)
225
+
226
+ for i, (meta_path, audio_archive_iterator) in enumerate(zip(meta_paths, audio_archives_iterators)):
227
+ meta_dict = load_meta(meta_path)
228
+
229
+ for audio_path_in_archive, audio_file in audio_archive_iterator:
230
+ # `audio_path_in_archive` is like "data/aishell_cuts_test.00000000/BAC/BAC009S0764W0393-359.wav"
231
+ audio_filename = os.path.split(audio_path_in_archive)[-1]
232
+
233
+ audio_id = audio_filename.split(".wav")[0]
234
+ audio_meta = meta_dict[audio_id]
235
+
236
+ audio_meta["segment_id"] = audio_id
237
+ audio_meta["original_full_path"] = audio_meta["recording"]["sources"][0]["source"]
238
+ audio_meta["text"] = audio_meta['supervisions'][0]['text']
239
+ audio_meta["speaker"] = audio_meta['supervisions'][0]['speaker']
240
+
241
+ path = os.path.join(local_audio_archives_paths[i], audio_path_in_archive) if local_audio_archives_paths \
242
+ else audio_path_in_archive
243
+
244
+ yield audio_id, {
245
+ "audio": {"path": path , "bytes": audio_file.read()},
246
+ **{feature: value for feature, value in audio_meta.items() if feature in self.info.features}
247
+ }
data/aishell_cuts_dev.00000000.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c245b7599726aadea475813f1203999d6aa3bcad810a14800ea828e8321546e
3
+ size 1043915
data/aishell_cuts_dev.00000000.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02a910401b1c67fe48ef897704a533651a649e1801e67bae89a1a781869da3b3
3
+ size 1105007721
data/aishell_cuts_dev.00000001.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f3d3c89456246eb245f0a7a7c185b42b6f20a3e1727c4d9ded399c97f44417d
3
+ size 449575
data/aishell_cuts_dev.00000001.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34a2f5e670098738f8771a693ff427f7d18652fef248626b255feee96a2b30ef
3
+ size 474702533
data/aishell_cuts_test.00000000.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e65da907f2673be9b81609c5c7cb14fa27ec696d4ab176d3851588b2890594c3
3
+ size 747291
data/aishell_cuts_test.00000000.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d0a3be600f0405e839f617b569a30aadf6f460b672f3defe78cb9499f22ecc9
3
+ size 867282901
data/aishell_cuts_train.00000000.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f27a6e8369e559c7cec39c9db050d0a6b99ecd651ee7870019c73df37f7e1f85
3
+ size 1077556
data/aishell_cuts_train.00000000.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b65f3b1c93113dd69d760b4f96914e803346ed2f1c2dae61133a8d19517bb754
3
+ size 1089651966
data/aishell_cuts_train.00000001.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62056bd18bc869e8a9c4ee2251cdd13201758f9b762cdcc5a94809e1159063e6
3
+ size 1078771
data/aishell_cuts_train.00000001.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66a65a7aeebbe513e004ff5272face6548a77229914dc069c5b4fd6ee2931396
3
+ size 1091331560
data/aishell_cuts_train.00000002.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0a8af4cac64f97dd04ba01c34728876ad2d69ad7550a977e0806fd4fd477198
3
+ size 1079388
data/aishell_cuts_train.00000002.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9f8836c60d84450a8057638e71b0f40259d84475ccf26160363bc24c2b88645
3
+ size 1092782547
data/aishell_cuts_train.00000003.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abcd628f696299a5659492bb7f96c0fd27245f609e02eb25d10115c7714fee08
3
+ size 1077528
data/aishell_cuts_train.00000003.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c90081e443553c59ebd409616cd558a8f0d49d145c233af19be56df0d81fc6d1
3
+ size 1087490314
data/aishell_cuts_train.00000004.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73190c8c8578a401aea9afc6c3816d222a59637cc7393f33816177b548bfc696
3
+ size 1078564
data/aishell_cuts_train.00000004.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17accd6ec12d0399590ee23ec5e5a6f5cca158750b4ff422b7f8528e94cdfb85
3
+ size 1096024028
data/aishell_cuts_train.00000005.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ee95a26dd0fdba7192fd21a771d94e9fc9ad06e1af7b3f06fe4f0cc8530c984
3
+ size 1079663
data/aishell_cuts_train.00000005.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad4261e5543cd9ee64fd4687fbceaac5bc8b6c13a217b972fdcccca8464527a9
3
+ size 1096395365
data/aishell_cuts_train.00000006.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a270a4ae8784dc41a95a600db84fda1bfb5f3cd5ea4e3d0202d06437146ced3
3
+ size 1080005
data/aishell_cuts_train.00000006.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2b4fe6431647d394117355d50671e21bb9db641d6aa5b85af0596ce39ea87ac
3
+ size 1092728540
data/aishell_cuts_train.00000007.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98f6cd97e3b9f154421856f4d6cc6a14c1c38b2866612d64876e8776b0e56872
3
+ size 1077869
data/aishell_cuts_train.00000007.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b01b9f4eeb7e145cc4efd5e0f3d6dc49e0aa7534f5963f9bd4aebafa70c528b6
3
+ size 1089323086
data/aishell_cuts_train.00000008.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b09c90febec86cc4e13c8d3e9172b7deaa51d666a9f09222ad076ac840e915a6
3
+ size 1077727
data/aishell_cuts_train.00000008.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a0ce844d77b3b2d2407459efd1006b70f16a1aebd084147c662137d96dfacc4
3
+ size 1091747456
data/aishell_cuts_train.00000009.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bbfc5b1e9b0e0bf98d665548baf1d42836df850d10d78e277d98ae96571a01d
3
+ size 1079868
data/aishell_cuts_train.00000009.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f455a16e4bbc06dc32720afbea5e2b79262094668518ec29c951daa1c8ec041
3
+ size 1089955750
data/aishell_cuts_train.00000010.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8389846b1325de3437f23e417dc5ec9c3316e4e2024f78c2ce01c3ffe7d95f19
3
+ size 1081759
data/aishell_cuts_train.00000010.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ee64130381313af1ff20550d5873fde7c3d28c93357ee67f5474da7747e03d2
3
+ size 1097796415
data/aishell_cuts_train.00000011.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a0a21fbc2555893b4d2f713f47685f988a10cdbe972d7fa9ea61b37277a6a73
3
+ size 1077820
data/aishell_cuts_train.00000011.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f89d08a4d6fd5ce4c82ae2b2b806be0e9bc209810f4c611021de1316f1375a2
3
+ size 1084767411
data/aishell_cuts_train.00000012.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cbb783d30d39f5450c66f8548a633733ba24c3d1940e9b074d4fad5bd6533b6
3
+ size 10448
data/aishell_cuts_train.00000012.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:379cf4f5f1c9ef51421f415e670a63f97de68ec6738d5cf5f2427986f58f50db
3
+ size 11357888