init
Browse files- download_audio.py +48 -8
- util.py +0 -42
download_audio.py
CHANGED
@@ -1,13 +1,55 @@
|
|
1 |
import os
|
|
|
|
|
|
|
|
|
|
|
2 |
from os.path import join as p_join
|
3 |
-
import pandas as pd
|
4 |
from tqdm import tqdm
|
5 |
-
from
|
|
|
|
|
|
|
|
|
6 |
|
7 |
|
8 |
url_metadata_s2s = "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz"
|
9 |
url_metadata_s2t = "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz"
|
10 |
cache_dir_root = "./download"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
|
13 |
def get_metadata(url: str):
|
@@ -17,7 +59,6 @@ def get_metadata(url: str):
|
|
17 |
assert wget(url, cache_dir=cache_dir)
|
18 |
df = pd.read_csv(p_join(cache_dir, filename), sep=r'[\t\s]', header=None)[[0, 2, 6, 9, 10, 11, 12]]
|
19 |
df.columns = ["id", "url", "text_lid_score", "laser_score", "direction", "side", "line_no"]
|
20 |
-
print(f"load metadata: {filename}, ({len(df)} rows)")
|
21 |
return df
|
22 |
|
23 |
|
@@ -30,11 +71,10 @@ def get_audio(url: str, filename: str):
|
|
30 |
|
31 |
def process_dataset(url_metadata):
|
32 |
df_metadata = get_metadata(url_metadata)
|
33 |
-
|
34 |
-
for _,
|
35 |
-
|
36 |
-
|
37 |
-
print(f"missing files: {num_missing_files}/{len(df_metadata)}")
|
38 |
|
39 |
|
40 |
if __name__ == '__main__':
|
|
|
1 |
import os
|
2 |
+
import tarfile
|
3 |
+
import zipfile
|
4 |
+
import gzip
|
5 |
+
import traceback
|
6 |
+
import requests
|
7 |
from os.path import join as p_join
|
|
|
8 |
from tqdm import tqdm
|
9 |
+
from multiprocessing import Pool
|
10 |
+
from typing import Optional
|
11 |
+
from urllib3.connection import ConnectionError
|
12 |
+
|
13 |
+
import pandas as pd
|
14 |
|
15 |
|
16 |
url_metadata_s2s = "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz"
|
17 |
url_metadata_s2t = "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz"
|
18 |
cache_dir_root = "./download"
|
19 |
+
n_pool = 8
|
20 |
+
|
21 |
+
|
22 |
+
def wget(url: str, cache_dir: str, filename: Optional[str] = None):
|
23 |
+
os.makedirs(cache_dir, exist_ok=True)
|
24 |
+
filename = os.path.basename(url) if not filename else filename
|
25 |
+
output_file = p_join(cache_dir, filename)
|
26 |
+
try:
|
27 |
+
with open(output_file, "wb") as f:
|
28 |
+
r = requests.get(url)
|
29 |
+
f.write(r.content)
|
30 |
+
except ConnectionError or KeyboardInterrupt:
|
31 |
+
traceback.print_exc()
|
32 |
+
os.remove(output_file)
|
33 |
+
return False
|
34 |
+
|
35 |
+
if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'):
|
36 |
+
if output_file.endswith('.tar'):
|
37 |
+
tar = tarfile.open(output_file)
|
38 |
+
else:
|
39 |
+
tar = tarfile.open(output_file, "r:gz")
|
40 |
+
tar.extractall(cache_dir)
|
41 |
+
tar.close()
|
42 |
+
os.remove(output_file)
|
43 |
+
elif output_file.endswith('.gz'):
|
44 |
+
with gzip.open(output_file, 'rb') as f:
|
45 |
+
with open(output_file.replace('.gz', ''), 'wb') as f_write:
|
46 |
+
f_write.write(f.read())
|
47 |
+
os.remove(output_file)
|
48 |
+
elif output_file.endswith('.zip'):
|
49 |
+
with zipfile.ZipFile(output_file, 'r') as zip_ref:
|
50 |
+
zip_ref.extractall(cache_dir)
|
51 |
+
os.remove(output_file)
|
52 |
+
return True
|
53 |
|
54 |
|
55 |
def get_metadata(url: str):
|
|
|
59 |
assert wget(url, cache_dir=cache_dir)
|
60 |
df = pd.read_csv(p_join(cache_dir, filename), sep=r'[\t\s]', header=None)[[0, 2, 6, 9, 10, 11, 12]]
|
61 |
df.columns = ["id", "url", "text_lid_score", "laser_score", "direction", "side", "line_no"]
|
|
|
62 |
return df
|
63 |
|
64 |
|
|
|
71 |
|
72 |
def process_dataset(url_metadata):
|
73 |
df_metadata = get_metadata(url_metadata)
|
74 |
+
print(f"load metadata: {url_metadata}, ({len(df_metadata)} rows)")
|
75 |
+
inputs = [(r, f"{r['direction']}.{r['side']}.{os.path.basename(r['url'])}") for _, r in df_metadata.iterrows()]
|
76 |
+
with Pool(n_pool) as pool:
|
77 |
+
pool.starmap(get_audio, tqdm(inputs, total=len(df_metadata)))
|
|
|
78 |
|
79 |
|
80 |
if __name__ == '__main__':
|
util.py
DELETED
@@ -1,42 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import tarfile
|
3 |
-
import zipfile
|
4 |
-
import gzip
|
5 |
-
import traceback
|
6 |
-
import requests
|
7 |
-
from os.path import join as p_join
|
8 |
-
from typing import Optional
|
9 |
-
from urllib3.connection import ConnectionError
|
10 |
-
|
11 |
-
|
12 |
-
def wget(url: str, cache_dir: str, filename: Optional[str] = None):
|
13 |
-
os.makedirs(cache_dir, exist_ok=True)
|
14 |
-
filename = os.path.basename(url) if not filename else filename
|
15 |
-
output_file = p_join(cache_dir, filename)
|
16 |
-
try:
|
17 |
-
with open(output_file, "wb") as f:
|
18 |
-
r = requests.get(url)
|
19 |
-
f.write(r.content)
|
20 |
-
except ConnectionError or KeyboardInterrupt:
|
21 |
-
traceback.print_exc()
|
22 |
-
os.remove(output_file)
|
23 |
-
return False
|
24 |
-
|
25 |
-
if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'):
|
26 |
-
if output_file.endswith('.tar'):
|
27 |
-
tar = tarfile.open(output_file)
|
28 |
-
else:
|
29 |
-
tar = tarfile.open(output_file, "r:gz")
|
30 |
-
tar.extractall(cache_dir)
|
31 |
-
tar.close()
|
32 |
-
os.remove(output_file)
|
33 |
-
elif output_file.endswith('.gz'):
|
34 |
-
with gzip.open(output_file, 'rb') as f:
|
35 |
-
with open(output_file.replace('.gz', ''), 'wb') as f_write:
|
36 |
-
f_write.write(f.read())
|
37 |
-
os.remove(output_file)
|
38 |
-
elif output_file.endswith('.zip'):
|
39 |
-
with zipfile.ZipFile(output_file, 'r') as zip_ref:
|
40 |
-
zip_ref.extractall(cache_dir)
|
41 |
-
os.remove(output_file)
|
42 |
-
return True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|