asahi417 commited on
Commit
f4b03a9
·
1 Parent(s): f0dde66
Files changed (1) hide show
  1. download_audio.py +48 -35
download_audio.py CHANGED
@@ -1,6 +1,7 @@
1
  """
2
  https://stackoverflow.com/questions/31353244/how-to-config-wget-to-retry-more-than-20
3
  """
 
4
  import os
5
  import tarfile
6
  import zipfile
@@ -9,20 +10,23 @@ import subprocess
9
  from os.path import join as p_join
10
  from tqdm import tqdm
11
  from multiprocessing import Pool
12
- from typing import Optional
13
 
14
  import pandas as pd
15
 
16
-
17
- url_metadata_s2s = "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz"
18
- url_metadata_s2t = "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz"
19
- cache_dir_root = "download"
20
- cache_dir_audio = p_join(cache_dir_root, "audio")
21
- cache_dir_metadata = p_join(cache_dir_root, "meta")
 
 
22
  n_pool = int(os.getenv("N_POOL", 8))
23
  wget_max_retry = os.getenv("MAX_RETRY", "1")
24
  wget_timeout = os.getenv("TIMEOUT", "20")
25
- data = os.getenv("DATA", "s2s")
 
26
 
27
 
28
  def wget(url: str, cache_dir: str, filename: Optional[str] = None):
@@ -52,44 +56,53 @@ def wget(url: str, cache_dir: str, filename: Optional[str] = None):
52
  return True
53
 
54
 
55
- def get_metadata(url: str):
56
- filename = os.path.basename(url).replace(".gz", "")
57
- if not os.path.exists(filename):
58
- assert wget(url, cache_dir=cache_dir_metadata)
59
- df = pd.read_csv(p_join(cache_dir_metadata, filename), sep=r'[\t\s]', header=None)[[0, 2, 6, 9, 10, 11, 12]]
 
 
60
  df.columns = ["id", "url", "text_lid_score", "laser_score", "direction", "side", "line_no"]
61
- return df
 
 
62
 
63
 
64
- def get_audio(url: str, filename: str):
65
- if not os.path.exists(p_join(cache_dir_audio, filename)):
66
- return wget(url, filename=filename, cache_dir=cache_dir_audio)
 
 
 
 
 
 
 
 
 
 
67
  return True
68
 
69
 
70
- def process_dataset(url_metadata):
71
- df_metadata = get_metadata(url_metadata)
72
- print(f"load metadata: {url_metadata}, ({len(df_metadata)} rows)")
73
- inputs = [(
74
- r['url'], f"{r['id']}.{r['direction']}.{r['side']}.{os.path.basename(r['url'])}"
75
- ) for _, r in df_metadata.iterrows()]
76
- inputs = [x for x in inputs if not os.path.exists(p_join(cache_dir_audio, x[1]))]
77
- print(f"{len(inputs)} urls to download")
 
78
  if n_pool == 1:
79
- for url, filename in tqdm(inputs, total=len(inputs)):
80
- flag = get_audio(url, filename)
81
- if not flag:
82
- print(f"failed:\n{url}")
83
  else:
84
  with Pool(n_pool) as pool:
85
  pool.starmap(get_audio, tqdm(inputs, total=len(inputs)))
86
 
87
 
88
  if __name__ == '__main__':
89
- if data == "s2s":
90
- process_dataset(url_metadata_s2s)
91
- elif data == "s2t":
92
- process_dataset(url_metadata_s2t)
93
- else:
94
- raise ValueError(f"unknown data type {data}")
95
 
 
1
  """
2
  https://stackoverflow.com/questions/31353244/how-to-config-wget-to-retry-more-than-20
3
  """
4
+ import json
5
  import os
6
  import tarfile
7
  import zipfile
 
10
  from os.path import join as p_join
11
  from tqdm import tqdm
12
  from multiprocessing import Pool
13
+ from typing import Optional, List
14
 
15
  import pandas as pd
16
 
17
+ # dataset config
18
+ url_metadata_dict = {
19
+ "enA-jaA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz",
20
+ "enA-jpn": "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz"
21
+ }
22
+ direction = os.getenv("DIRECTION", "enA-jaA")
23
+ sides = set(direction.split("-"))
24
+ # processor config
25
  n_pool = int(os.getenv("N_POOL", 8))
26
  wget_max_retry = os.getenv("MAX_RETRY", "1")
27
  wget_timeout = os.getenv("TIMEOUT", "20")
28
+ line_no_start = int(os.getenv("LINE_NO_START", 0))
29
+ line_no_end = int(os.getenv("LINE_NO_END", 500000))
30
 
31
 
32
  def wget(url: str, cache_dir: str, filename: Optional[str] = None):
 
56
  return True
57
 
58
 
59
+ def get_metadata():
60
+ url_metadata = url_metadata_dict[direction]
61
+ meta_data_filename = os.path.basename(url_metadata).replace(".gz", "")
62
+ cache_dir_metadata = p_join("download", "meta", meta_data_filename)
63
+ if not os.path.exists(cache_dir_metadata):
64
+ assert wget(url_metadata, cache_dir=cache_dir_metadata)
65
+ df = pd.read_csv(meta_data_filename, sep=r'[\t\s]', header=None)[[0, 2, 6, 9, 10, 11, 12]]
66
  df.columns = ["id", "url", "text_lid_score", "laser_score", "direction", "side", "line_no"]
67
+ assert len(df["direction"].unique()) == 1
68
+ df.pop("direction")
69
+ return df.sort_values(by=["line_no", "side", "direction"])
70
 
71
 
72
+ def get_audio(dataframe: pd.DataFrame):
73
+ cache_dir_audio = p_join("download", "audio", direction)
74
+ cache_dir_feature = p_join("download", "feature", direction)
75
+ features = {"line_no": dataframe.pop('line_no').values[0]}
76
+ for side, df in dataframe.groupby("side"):
77
+ df.pop("side")
78
+ features.update({f"{side}.{k}": v for k, v in df.iloc[0].to_dict().items()})
79
+ features[f"{side}.path"] = p_join(cache_dir_audio, os.path.basename(features[f"{side}.url"]))
80
+ if not os.path.exists(features[f"{side}.path"]):
81
+ if not wget(features[f"{side}.url"], filename=features[f"{side}.path"], cache_dir=cache_dir_audio):
82
+ return False
83
+ with open(cache_dir_feature, "w") as f:
84
+ json.dump(features, f)
85
  return True
86
 
87
 
88
+ def process_dataset():
89
+ df_metadata = get_metadata()
90
+ print(f"metadata: {len(df_metadata)}")
91
+ inputs = [g for line_no, g in df_metadata.groupby("line_no") if line_no_start <= line_no < line_no_end]
92
+ print(f"filtered unique lines: {len(inputs)}")
93
+ inputs = [g for g in inputs if len(g) == 2]
94
+ print(f"removed != 2: {len(inputs)}")
95
+ inputs = [g for g in inputs if len(g["side"].unique()) == 2 and set(g["side"].unique()) == sides]
96
+ print(f"removed side != 2: {len(inputs)}")
97
  if n_pool == 1:
98
+ for g in tqdm(inputs, total=len(inputs)):
99
+ if not get_audio(g):
100
+ print(f"failed:\n{g['url']}")
 
101
  else:
102
  with Pool(n_pool) as pool:
103
  pool.starmap(get_audio, tqdm(inputs, total=len(inputs)))
104
 
105
 
106
  if __name__ == '__main__':
107
+ process_dataset()
 
 
 
 
 
108