Lennart Keller commited on
Commit
97a925f
1 Parent(s): a4d6d07
Files changed (4) hide show
  1. README.md +34 -1
  2. create_release.py +45 -21
  3. data.zip +2 -2
  4. tables.zip +2 -2
README.md CHANGED
@@ -46,4 +46,37 @@ dataset = load_dataset(
46
  split="train",
47
  trust_remote_code=True
48
  )
49
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  split="train",
47
  trust_remote_code=True
48
  )
49
+ ```
50
+
51
+ ## Overview
52
+
53
+ | | Language | alpha3 | train | test | dev | total |
54
+ |---:|:-------------------------|:-----------|--------:|-------:|------:|--------:|
55
+ | 0 | Vietnamese | vie | 856 | 111 | 106 | 1073 |
56
+ | 1 | French | fra | 851 | 108 | 106 | 1065 |
57
+ | 2 | Russian | rus | 822 | 107 | 102 | 1031 |
58
+ | 3 | Ukrainian | ukr | 751 | 97 | 89 | 937 |
59
+ | 4 | Kannada | kan | 740 | 100 | 89 | 929 |
60
+ | 5 | Gujarati | guj | 740 | 100 | 89 | 929 |
61
+ | 6 | Yoruba | yor | 739 | 100 | 88 | 927 |
62
+ | 7 | Punjabi | pan | 739 | 100 | 88 | 927 |
63
+ | 8 | Naga Pidgin | nag | 739 | 100 | 89 | 928 |
64
+ | 9 | Luo (Kenya and Tanzania) | luo | 738 | 100 | 88 | 926 |
65
+ | 10 | Tamil | tam | 733 | 100 | 89 | 922 |
66
+ | 11 | Marathi | mar | 733 | 99 | 87 | 919 |
67
+ | 12 | Assamese | asm | 732 | 98 | 88 | 918 |
68
+ | 13 | Haryanvi | bgc | 729 | 100 | 87 | 916 |
69
+ | 14 | Bhattiyali | bht | 726 | 98 | 88 | 912 |
70
+ | 15 | Malayalam | mal | 724 | 100 | 89 | 913 |
71
+ | 16 | Ewe | ewe | 724 | 98 | 86 | 908 |
72
+ | 17 | Central Kurdish | ckb | 723 | 93 | 82 | 898 |
73
+ | 18 | Telugu | tel | 722 | 96 | 85 | 903 |
74
+ | 19 | Igbo | ibo | 720 | 96 | 87 | 903 |
75
+ | 20 | Pengo | peg | 707 | 94 | 86 | 887 |
76
+ | 21 | Ndebele | nde | 699 | 88 | 85 | 872 |
77
+ | 22 | Asante Twi | tw-asante | 693 | 92 | 88 | 873 |
78
+ | 23 | Akuapem Twi | tw-akuapem | 692 | 91 | 84 | 867 |
79
+ | 24 | Urdu | urd | 674 | 95 | 80 | 849 |
80
+ | 25 | Nahali | nlx | 672 | 92 | 85 | 849 |
81
+ | 26 | English | eng | 569 | 81 | 74 | 724 |
82
+ | 27 | Lingala | lin | 560 | 75 | 61 | 696 |
create_release.py CHANGED
@@ -9,6 +9,8 @@ import jinja2
9
  import pandas as pd
10
  import uroman as ur
11
  from unidecode import unidecode
 
 
12
 
13
  # Define some variables
14
  NAME = "SpeechTaxi"
@@ -277,6 +279,19 @@ def read_bible_scrape_instance(audio_file: str | Path) -> dict:
277
  return data
278
 
279
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
  # Main logic
281
  if __name__ == "__main__":
282
  dataset_root = SAVE_DIR / NAME
@@ -291,7 +306,7 @@ if __name__ == "__main__":
291
  data_dir.mkdir(parents=True)
292
  table_dir = dataset_root / "tables"
293
  table_dir.mkdir(parents=True)
294
-
295
  # Copy this script to dataset dir for reproducibility
296
  copy_self(dataset_root)
297
 
@@ -303,7 +318,9 @@ if __name__ == "__main__":
303
 
304
  # Write code file
305
  success_languages = []
306
- for alignment_table in language_tables:
 
 
307
  df = pd.read_table(alignment_table)
308
  try:
309
  df = filter_instances(df, alignment_table)
@@ -317,10 +334,19 @@ if __name__ == "__main__":
317
  for _, row in df.iterrows():
318
  if alignment_table.stem not in MASS_LANGUAGES:
319
  audio_file = rewrite_path(Path(row["audio_path"]))
 
 
 
320
  copied_audio_file = copy_bible_scrape_audio(audio_file, root=data_dir)
321
  english_text = row["en_text"]
322
- data = read_bible_scrape_instance(audio_file)
323
- # copied_audio = copy_file(audio_file, data_dir)
 
 
 
 
 
 
324
  data = (
325
  {"verse_ref": data.pop("verse_ref")}
326
  | {"text_en": english_text}
@@ -331,8 +357,18 @@ if __name__ == "__main__":
331
  final_table_data.append(data)
332
  elif INCLUDE_MASS_LANGUAGES:
333
  audio_file = rewrite_mass_path(Path(row["audio_path"]))
 
 
 
334
  copied_audio_file = copy_mass_audio(audio_file, root=data_dir)
335
- data = read_mass_instance(audio_file)
 
 
 
 
 
 
 
336
  data = data | {
337
  "split": row["split"],
338
  "label": row["label"],
@@ -340,22 +376,10 @@ if __name__ == "__main__":
340
  "audio": copied_audio_file.relative_to(data_dir).as_posix(),
341
  }
342
  final_table_data.append(data)
343
- fina_table = pd.DataFrame.from_records(final_table_data)
344
- # Reorder columns
345
- # cols = [
346
- # "verse_ref",
347
- # "text_en",
348
- # "split",
349
- # "label",
350
- # "transcription",
351
- # "transcription_romanized",
352
- # "transription_mms-zeroshot-300m",
353
- # "transription_whisper-large-v3",
354
- # "transription_mms-1b-all",
355
- # "audio",
356
- # ]
357
- # fina_table = fina_table[cols]
358
- fina_table.to_csv(table_dir / alignment_table.name, index=False, sep="\t")
359
 
360
  # Now render dataset loading script and write to dataset dir
361
  code_file = dataset_root / f"{NAME}.py"
 
9
  import pandas as pd
10
  import uroman as ur
11
  from unidecode import unidecode
12
+ from lnn.utils import load_audio
13
+ from tqdm.auto import tqdm
14
 
15
  # Define some variables
16
  NAME = "SpeechTaxi"
 
279
  return data
280
 
281
 
282
+ def is_audio_valid(audio_path: str | Path):
283
+ audio_path = Path(audio_path)
284
+ if not audio_path.exists():
285
+ return False
286
+ try:
287
+ wv, sr = load_audio(audio_path, return_tensor="torch")
288
+ wv = wv[0].reshape(-1)
289
+ except Exception:
290
+ return False
291
+ # We need *at least* one second of audio
292
+ return wv.numel() >= sr
293
+
294
+
295
  # Main logic
296
  if __name__ == "__main__":
297
  dataset_root = SAVE_DIR / NAME
 
306
  data_dir.mkdir(parents=True)
307
  table_dir = dataset_root / "tables"
308
  table_dir.mkdir(parents=True)
309
+
310
  # Copy this script to dataset dir for reproducibility
311
  copy_self(dataset_root)
312
 
 
318
 
319
  # Write code file
320
  success_languages = []
321
+ pbar = tqdm(language_tables, desc="Creating dataset...")
322
+ invalid_counter = 0
323
+ for alignment_table in pbar:
324
  df = pd.read_table(alignment_table)
325
  try:
326
  df = filter_instances(df, alignment_table)
 
334
  for _, row in df.iterrows():
335
  if alignment_table.stem not in MASS_LANGUAGES:
336
  audio_file = rewrite_path(Path(row["audio_path"]))
337
+ if not is_audio_valid(audio_file):
338
+ invalid_counter += 1
339
+ continue
340
  copied_audio_file = copy_bible_scrape_audio(audio_file, root=data_dir)
341
  english_text = row["en_text"]
342
+ try:
343
+ data = read_bible_scrape_instance(audio_file)
344
+ except Exception as e:
345
+ print("Error reading file", audio_file)
346
+ print(e)
347
+ print("_"*30)
348
+ continue
349
+ # Files that can't be read have zero alignment scores anyways so in most cases this isn't an issue
350
  data = (
351
  {"verse_ref": data.pop("verse_ref")}
352
  | {"text_en": english_text}
 
357
  final_table_data.append(data)
358
  elif INCLUDE_MASS_LANGUAGES:
359
  audio_file = rewrite_mass_path(Path(row["audio_path"]))
360
+ if not is_audio_valid(audio_file):
361
+ invalid_counter += 1
362
+ continue
363
  copied_audio_file = copy_mass_audio(audio_file, root=data_dir)
364
+ try:
365
+ data = read_mass_instance(audio_file)
366
+ except Exception as e:
367
+ print("Error reading file", audio_file)
368
+ print(e)
369
+ print("_"*30)
370
+ continue
371
+ # Files that can't be read have zero alignment scores anyways so in most cases this isn't an issue
372
  data = data | {
373
  "split": row["split"],
374
  "label": row["label"],
 
376
  "audio": copied_audio_file.relative_to(data_dir).as_posix(),
377
  }
378
  final_table_data.append(data)
379
+ pbar.set_description(f"{str(alignment_table)} | Total invalid: {invalid_counter}")
380
+
381
+ final_table = pd.DataFrame.from_records(final_table_data)
382
+ final_table.to_csv(table_dir / alignment_table.name, index=False, sep="\t")
 
 
 
 
 
 
 
 
 
 
 
 
383
 
384
  # Now render dataset loading script and write to dataset dir
385
  code_file = dataset_root / f"{NAME}.py"
data.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:36c0b8e4b56394e00997c7dd7af438c8eabc9decac6bf20dea1a41531fa34a76
3
- size 2877483349
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0199424abee149174d9ea5eb0a12b4828ff21d1ffd6670cbd4f2244e43e305e
3
+ size 2905453052
tables.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1ba1889181bf4f12fac30083d650c81347e8fe265547f21ef1f9ff0887d866f2
3
- size 7457350
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b4ac7fc8d111d463295402b1469117ef74ba5bdfa7ddc5b9a2fae3fef2f4306
3
+ size 7586373