Datasets:

ArXiv:
License:
polinaeterna HF staff commited on
Commit
a44f1ba
·
1 Parent(s): c7a2c9d

update script

Browse files
Files changed (1) hide show
  1. evi.py +42 -69
evi.py CHANGED
@@ -16,6 +16,7 @@
16
  import csv
17
  import json
18
  import os
 
19
 
20
  import datasets
21
 
@@ -39,12 +40,22 @@ _ALL_CONFIGS = sorted([
39
  "en-GB", "fr-FR", "pl-PL"
40
  ])
41
 
 
 
42
  _DESCRIPTION = "EVI is a dataset for enrolment, identification, and verification" # noqa
43
 
44
  _HOMEPAGE_URL = "https://arxiv.org/abs/2204.13496"
45
 
 
 
 
 
 
 
46
  _AUDIO_DATA_URL = "https://poly-public-data.s3.eu-west-2.amazonaws.com/evi-paper/audios.zip" # noqa
47
 
 
 
48
  _VERSION = datasets.Version("0.0.10", "")
49
 
50
 
@@ -52,17 +63,10 @@ class EviConfig(datasets.BuilderConfig):
52
  """BuilderConfig for EVI"""
53
 
54
  def __init__(
55
- self, name, version, description, homepage, audio_data_url
56
  ):
57
- super().__init__(
58
- name=self.name,
59
- version=version,
60
- description=self.description,
61
- )
62
- self.name = name
63
- self.description = description
64
- self.homepage = homepage
65
- self.audio_data_url = audio_data_url
66
 
67
 
68
  def _build_config(name):
@@ -70,104 +74,84 @@ def _build_config(name):
70
  name=name,
71
  version=_VERSION,
72
  description=_DESCRIPTION,
73
- homepage=_HOMEPAGE_URL,
74
- audio_data_url=_AUDIO_DATA_URL,
75
  )
76
 
77
 
78
  class Evi(datasets.GeneratorBasedBuilder):
79
 
80
- DEFAULT_WRITER_BATCH_SIZE = 1000
81
  BUILDER_CONFIGS = [_build_config(name) for name in _ALL_CONFIGS + ["all"]]
82
 
83
  def _info(self):
84
- task_templates = None
85
- langs = _ALL_CONFIGS
86
  features = datasets.Features(
87
  {
88
- "lang_id": datasets.ClassLabel(names=langs),
89
  "dialogue_id": datasets.Value("string"),
90
  "speaker_id": datasets.Value("string"),
91
  "turn_id": datasets.Value("int32"),
92
- #
93
  "target_profile_id": datasets.Value("string"),
94
- #
95
  "asr_transcription": datasets.Value("string"),
96
  "asr_nbest": datasets.Sequence(datasets.Value("string")),
97
- #
98
  "path": datasets.Value("string"),
99
- #"audio": datasets.Audio(sampling_rate=8_000),
100
  }
101
  )
102
 
103
  return datasets.DatasetInfo(
104
  version=self.config.version,
105
  description=self.config.description,
106
- homepage=self.config.homepage,
107
  license="CC-BY-4.0",
108
  citation=_CITATION,
109
  features=features,
110
- supervised_keys=None,
111
- task_templates=task_templates,
112
  )
113
 
114
  def _split_generators(self, dl_manager):
115
- langs = (
116
- _ALL_CONFIGS
117
- if self.config.name == "all"
118
- else [self.config.name]
119
- )
120
 
121
- # audio_path = dl_manager.download_and_extract(
122
- # self.config.audio_data_url
123
- # )
124
- audio_path = ""
125
- text_path = "data"
126
- lang2text_path = {
127
- _lang: os.path.join(
128
- text_path,
129
- f"dialogues.{_lang.split('-')[0]}.tsv"
130
- )
131
- for _lang in langs
132
- }
133
- lang2audio_path = {
134
- _lang: os.path.join(
135
- audio_path,
136
- f"{_lang.split('-')[0]}"
137
- )
138
- for _lang in langs
139
  }
 
 
 
140
  return [
141
  datasets.SplitGenerator(
142
  name=datasets.Split.TEST,
143
  gen_kwargs={
144
- "audio_paths": lang2audio_path,
145
- "text_paths": lang2text_path,
 
 
146
  },
147
  )
148
  ]
149
 
150
- def _generate_examples(self, audio_paths, text_paths):
151
- key = 0
152
  for lang in text_paths.keys():
153
  text_path = text_paths[lang]
154
- audio_path = audio_paths[lang]
155
  with open(text_path, encoding="utf-8") as fin:
156
  reader = csv.DictReader(
157
  fin, delimiter="\t", skipinitialspace=True
158
  )
159
- for dictrow in reader:
 
 
160
  dialogue_id = dictrow["dialogue_id"]
161
  turn_id = dictrow["turn_num"]
162
  file_path = os.path.join(
163
- audio_path,
 
164
  dialogue_id,
165
  f'{turn_id}.wav'
166
  )
167
- # if not os.path.isfile(file_path):
168
- # file_path = None
169
- example = {
170
- "lang_id": _ALL_CONFIGS.index(lang),
171
  "dialogue_id": dialogue_id,
172
  "speaker_id": dictrow["speaker_id"],
173
  "turn_id": turn_id,
@@ -175,16 +159,5 @@ class Evi(datasets.GeneratorBasedBuilder):
175
  "asr_transcription": dictrow["transcription"],
176
  "asr_nbest": json.loads(dictrow["nbest"]),
177
  "path": file_path,
178
- # "audio": file_path,
179
  }
180
- yield key, example
181
- key += 1
182
-
183
- #
184
- # if __name__ == '__main__':
185
- # d = Evi(name='fr-FR')
186
- # d.download_and_prepare()
187
- # d = d.as_dataset(datasets.Split.TEST)
188
- # print(d)
189
- # for e in d:
190
- # print(e)
 
16
  import csv
17
  import json
18
  import os
19
+ from pathlib import Path
20
 
21
  import datasets
22
 
 
40
  "en-GB", "fr-FR", "pl-PL"
41
  ])
42
 
43
+ _LANGS = sorted(["en", "fr", "pl"])
44
+
45
  _DESCRIPTION = "EVI is a dataset for enrolment, identification, and verification" # noqa
46
 
47
  _HOMEPAGE_URL = "https://arxiv.org/abs/2204.13496"
48
 
49
+ _BASE_URL = "https://huggingface.co/datasets/PolyAI/evi/resolve/main/data"
50
+
51
+ _TEXT_URL = {
52
+ lang: os.path.join(_BASE_URL, f"dialogues.{lang.split('-')[0]}.tsv") for lang in _LANGS
53
+ }
54
+
55
  _AUDIO_DATA_URL = "https://poly-public-data.s3.eu-west-2.amazonaws.com/evi-paper/audios.zip" # noqa
56
 
57
+ _ARCHIVE_PATH = "zip://::https://poly-public-data.s3.eu-west-2.amazonaws.com/evi-paper/audios.zip"
58
+
59
  _VERSION = datasets.Version("0.0.10", "")
60
 
61
 
 
63
  """BuilderConfig for EVI"""
64
 
65
  def __init__(
66
+ self, name, *args, **kwargs
67
  ):
68
+ super().__init__(name=name, *args, **kwargs)
69
+ self.languages = _LANGS if name == "all" else [name.split("-")[0]] # all langs if config == "all"
 
 
 
 
 
 
 
70
 
71
 
72
  def _build_config(name):
 
74
  name=name,
75
  version=_VERSION,
76
  description=_DESCRIPTION,
77
+ # homepage=_HOMEPAGE_URL,
 
78
  )
79
 
80
 
81
  class Evi(datasets.GeneratorBasedBuilder):
82
 
83
+ DEFAULT_WRITER_BATCH_SIZE = 256
84
  BUILDER_CONFIGS = [_build_config(name) for name in _ALL_CONFIGS + ["all"]]
85
 
86
  def _info(self):
 
 
87
  features = datasets.Features(
88
  {
89
+ "language": datasets.ClassLabel(names=_LANGS),
90
  "dialogue_id": datasets.Value("string"),
91
  "speaker_id": datasets.Value("string"),
92
  "turn_id": datasets.Value("int32"),
 
93
  "target_profile_id": datasets.Value("string"),
 
94
  "asr_transcription": datasets.Value("string"),
95
  "asr_nbest": datasets.Sequence(datasets.Value("string")),
 
96
  "path": datasets.Value("string"),
97
+ "audio": datasets.Audio(sampling_rate=8_000),
98
  }
99
  )
100
 
101
  return datasets.DatasetInfo(
102
  version=self.config.version,
103
  description=self.config.description,
 
104
  license="CC-BY-4.0",
105
  citation=_CITATION,
106
  features=features,
 
 
107
  )
108
 
109
  def _split_generators(self, dl_manager):
 
 
 
 
 
110
 
111
+ langs = self.config.languages
112
+ # "./audios/{lang}/{dialogue_id}/3.wav"
113
+
114
+ lang2text_urls = {
115
+ lang: _TEXT_URL[lang] for lang in langs
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  }
117
+ lang2text_paths = dl_manager.download_and_extract(lang2text_urls)
118
+ audio_data_path = dl_manager.download_and_extract(_AUDIO_DATA_URL)
119
+
120
  return [
121
  datasets.SplitGenerator(
122
  name=datasets.Split.TEST,
123
  gen_kwargs={
124
+ # "audio_data": dl_manager.iter_files(audio_data_path),
125
+ "audio_data_path": audio_data_path,
126
+ "text_paths": lang2text_paths,
127
+ # "streaming": dl_manager.is_streaming
128
  },
129
  )
130
  ]
131
 
132
+ def _generate_examples(self, audio_data_path, text_paths):
133
+
134
  for lang in text_paths.keys():
135
  text_path = text_paths[lang]
 
136
  with open(text_path, encoding="utf-8") as fin:
137
  reader = csv.DictReader(
138
  fin, delimiter="\t", skipinitialspace=True
139
  )
140
+ for i, dictrow in enumerate(reader):
141
+ if i == 199:
142
+ break
143
  dialogue_id = dictrow["dialogue_id"]
144
  turn_id = dictrow["turn_num"]
145
  file_path = os.path.join(
146
+ "audios",
147
+ lang,
148
  dialogue_id,
149
  f'{turn_id}.wav'
150
  )
151
+ full_path = Path(audio_data_path) / file_path
152
+
153
+ yield file_path, {
154
+ "language": lang,
155
  "dialogue_id": dialogue_id,
156
  "speaker_id": dictrow["speaker_id"],
157
  "turn_id": turn_id,
 
159
  "asr_transcription": dictrow["transcription"],
160
  "asr_nbest": json.loads(dictrow["nbest"]),
161
  "path": file_path,
162
+ "audio": str(full_path)
163
  }