File size: 4,821 Bytes
f4b03a9 0021056 6ed1c54 7210459 0021056 6ed1c54 29dac43 6ed1c54 28ad598 f4b03a9 cc01e7a f4b03a9 fdf60eb f0dde66 f4b03a9 ab289ef 6ed1c54 cc01e7a f0dde66 7210459 6ed1c54 84f9b19 6ed1c54 cc01e7a 6ed1c54 0021056 f4b03a9 d9a6351 cc01e7a 6697bc7 cc01e7a b184add f4b03a9 fb589be 0021056 6171564 f4b03a9 6171564 f4b03a9 6171564 f4b03a9 cc01e7a f4b03a9 ab289ef f4b03a9 04ec25f 0021056 f4b03a9 23942b2 f4b03a9 fdf60eb f4b03a9 fdf60eb cc01e7a 0021056 f4b03a9 28ad598 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
import json
import os
import tarfile
import zipfile
import gzip
import subprocess
from os.path import join as p_join
from tqdm import tqdm
from multiprocessing import Pool
from typing import Optional
import pandas as pd
# dataset config
url_metadata_dict = {
"enA-jaA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz",
"enA-jpn": "https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz"
}
direction = os.getenv("DIRECTION", "enA-jaA")
sides = set(direction.split("-"))
cache_dir_audio = p_join("download", "audio", direction)
cache_dir_feature = p_join("download", "feature", direction)
os.makedirs(cache_dir_audio, exist_ok=True)
os.makedirs(cache_dir_feature, exist_ok=True)
# processor config
n_pool = int(os.getenv("N_POOL", 8))
wget_max_retry = os.getenv("MAX_RETRY", "1")
wget_timeout = os.getenv("TIMEOUT", "20")
line_no_start = int(os.getenv("LINE_NO_START", 0))
line_no_end = int(os.getenv("LINE_NO_END", 10000))
def wget(url: str, output_file: Optional[str] = None):
os.makedirs(os.path.dirname(output_file), exist_ok=True)
subprocess.run(["wget", url, "-O", output_file, "--tries", wget_max_retry, "--timeout", wget_timeout])
if not os.path.exists(output_file):
return False
if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'):
if output_file.endswith('.tar'):
tar = tarfile.open(output_file)
else:
tar = tarfile.open(output_file, "r:gz")
tar.extractall(os.path.dirname(output_file))
tar.close()
os.remove(output_file)
elif output_file.endswith('.gz'):
with gzip.open(output_file, 'rb') as f:
with open(output_file.replace('.gz', ''), 'wb') as f_write:
f_write.write(f.read())
os.remove(output_file)
elif output_file.endswith('.zip'):
with zipfile.ZipFile(output_file, 'r') as zip_ref:
zip_ref.extractall()
os.remove(output_file)
return True
def get_metadata():
url_metadata = url_metadata_dict[direction]
meta_data_filename = os.path.basename(url_metadata)
meta_data_path = p_join("download", "meta", meta_data_filename)
if not os.path.exists(meta_data_path.replace(".gz", "")):
assert wget(url_metadata, output_file=meta_data_path)
if direction == "enA-jaA":
df = pd.read_csv(meta_data_path.replace(".gz", ""), sep=r'[\t\s]', header=None)
df = df[[0, 2, 3, 4, 9, 10, 11, 12]]
df.columns = ["id", "url", "duration_start", "duration_end", "laser_score", "direction", "side", "line_no"]
else:
raise NotImplementedError("")
# df = pd.read_csv(meta_data_path.replace(".gz", ""), sep='\t', header=None)[[0, 2, 4, 6, 9, 10, 11, 10]]
# df.columns = ["id", "url", "duration", "text_lid_score", "laser_score", "direction", "side", "line_no"]
assert len(df["direction"].unique()) == 1
df.pop("direction")
return df.sort_values(by=["line_no", "side"])
def to_json_serializable(val):
if "float" in str(type(val)):
return float(val)
if "int" in str(type(val)):
return int(val)
return str(val)
def get_audio(dataframe: pd.DataFrame):
features = {"line_no": int(dataframe.pop('line_no').values[0])}
for side, df in dataframe.groupby("side"):
df.pop("side")
features.update({f"{side}.{k}": to_json_serializable(v) for k, v in df.iloc[0].to_dict().items()})
features[f"{side}.path"] = str(p_join(cache_dir_audio, os.path.basename(features[f"{side}.url"])))
if not os.path.exists(features[f"{side}.path"]):
if not wget(features[f"{side}.url"], output_file=features[f"{side}.path"]):
return False
with open(p_join(cache_dir_feature, f'{features["line_no"]}.json'), "w") as f:
json.dump(features, f)
return True
def process_dataset():
df_metadata = get_metadata()
print(f"metadata: {len(df_metadata)}, {line_no_start} --> {line_no_end}")
inputs = [g for line_no, g in df_metadata.groupby("line_no") if line_no_start <= line_no < line_no_end]
print(f"filtered unique lines: {len(inputs)}")
inputs = [g for g in inputs if len(g) == 2]
print(f"removed != 2: {len(inputs)}")
inputs = [g for g in inputs if len(g["side"].unique()) == 2 and set(g["side"].unique()) == sides]
print(f"removed side != 2: {len(inputs)}")
if n_pool == 1:
for g in tqdm(inputs, total=len(inputs)):
if not get_audio(g):
print(f"failed:\n{g['url']}")
else:
with Pool(n_pool) as pool:
pool.map(get_audio, tqdm(inputs, total=len(inputs)))
if __name__ == '__main__':
process_dataset()
|