asahi417 commited on
Commit
54f280d
1 Parent(s): eac7224
Files changed (5) hide show
  1. delete_audio.py +0 -15
  2. fetch_dataset_s2s.py +206 -0
  3. main.sh +0 -256
  4. main_s2s.sh +101 -0
  5. main_s2t.sh +60 -0
delete_audio.py DELETED
@@ -1,15 +0,0 @@
1
- import os
2
- from os.path import join as p_join
3
- from glob import glob
4
- from tqdm import tqdm
5
-
6
- direction = os.getenv("DIRECTION", "enA-jaA")
7
- cache_dir_audio = p_join("download", "audio", direction)
8
- cache_dir_feature = p_join("download", "feature", direction)
9
- line_no_start = int(os.getenv("LINE_NO_START", 0))
10
- line_no_end = int(os.getenv("LINE_NO_END", 10000))
11
- for i in tqdm(range(line_no_start, line_no_end), total=line_no_end-line_no_start):
12
- for audio_file in glob(p_join(cache_dir_audio, "*", f"{i}.*")):
13
- os.remove(audio_file)
14
- if os.path.exists(p_join(cache_dir_feature, f"{i}.json")):
15
- os.remove(p_join(cache_dir_feature, f"{i}.json"))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fetch_dataset_s2s.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import tarfile
4
+ import zipfile
5
+ import gzip
6
+ import subprocess
7
+ from os.path import join as p_join
8
+ from math import ceil, floor
9
+ from tqdm import tqdm
10
+ from multiprocessing import Pool
11
+ from typing import Optional, Dict
12
+ from glob import glob
13
+ # import librosa
14
+
15
+ import pandas as pd
16
+ import soundfile as sf
17
+ from datasets import Dataset, Audio, DatasetDict
18
+
19
+ audio_loader = Audio()
20
+ # dataset config
21
+ url_metadata_dict = {
22
+ "enA-jaA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-jaA.tsv.gz",
23
+ "enA-zhA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-zhA.tsv.gz",
24
+ "enA-viA": "https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.enA-viA.tsv.gz",
25
+ }
26
+ direction = os.getenv("DIRECTION", "enA-jaA")
27
+ if direction not in url_metadata_dict:
28
+ a, b = direction.split("-")
29
+ url_metadata_dict[direction] = f"https://dl.fbaipublicfiles.com/seamless/data/seamless_align_nov2023_extension/seamless.dataset.metadata.public.{a}-{b}.tsv.gz"
30
+ sides = set(direction.split("-"))
31
+ cache_dir_audio = p_join("download", "audio", direction)
32
+ cache_dir_feature = p_join("download", "feature", direction)
33
+ os.makedirs(cache_dir_feature, exist_ok=True)
34
+ for s in sides:
35
+ os.makedirs(p_join(cache_dir_audio, s), exist_ok=True)
36
+ # processor config
37
+ n_pool = int(os.getenv("N_POOL", 1))
38
+ wget_max_retry = os.getenv("MAX_RETRY", "2")
39
+ wget_timeout = os.getenv("TIMEOUT", "20")
40
+ line_no_start = int(os.getenv("LINE_NO_START", 0))
41
+ line_no_end = int(os.getenv("LINE_NO_END", 10000))
42
+ dataset_id = os.getenv("DATASET_ID", 0)
43
+ hf_org = os.getenv("HF_ORG", "asahi417")
44
+ hf_dataset = f"seamless-align-{direction}"
45
+ skip_download = bool(int(os.getenv("SKIP_DOWNLOAD", 0)))
46
+ sampling_rate = 16000 # seamless-align aligns audio in 16kHz
47
+
48
+
49
+ def wget(url: str, output_file: Optional[str] = None):
50
+ os.makedirs(os.path.dirname(output_file), exist_ok=True)
51
+ subprocess.run(["wget", url, "-O", output_file, "--tries", wget_max_retry, "--timeout", wget_timeout])
52
+ if not os.path.exists(output_file):
53
+ return False
54
+ if output_file.endswith('.tar.gz') or output_file.endswith('.tgz') or output_file.endswith('.tar'):
55
+ if output_file.endswith('.tar'):
56
+ tar = tarfile.open(output_file)
57
+ else:
58
+ tar = tarfile.open(output_file, "r:gz")
59
+ tar.extractall(os.path.dirname(output_file))
60
+ tar.close()
61
+ os.remove(output_file)
62
+ elif output_file.endswith('.gz'):
63
+ with gzip.open(output_file, 'rb') as f:
64
+ with open(output_file.replace('.gz', ''), 'wb') as f_write:
65
+ f_write.write(f.read())
66
+ os.remove(output_file)
67
+ elif output_file.endswith('.zip'):
68
+ with zipfile.ZipFile(output_file, 'r') as zip_ref:
69
+ zip_ref.extractall()
70
+ os.remove(output_file)
71
+ return True
72
+
73
+
74
+ def get_metadata():
75
+ url_metadata = url_metadata_dict[direction]
76
+ meta_data_filename = os.path.basename(url_metadata)
77
+ meta_data_path = p_join("download", "meta", meta_data_filename)
78
+ if not os.path.exists(meta_data_path.replace(".gz", "")):
79
+ assert wget(url_metadata, output_file=meta_data_path)
80
+ df = pd.read_csv(meta_data_path.replace(".gz", ""), sep=r'[\t\s]', header=None)
81
+ df = df[[0, 2, 3, 4, 9, 10, 11, 12]]
82
+ df.columns = ["id", "url", "duration_start", "duration_end", "laser_score", "direction", "side", "line_no"]
83
+ if direction == "enA-jpn":
84
+ df = df[df["side"] == "enA"]
85
+ assert len(df["direction"].unique()) == 1
86
+ df.pop("direction")
87
+ return df.sort_values(by=["line_no", "side"])
88
+
89
+
90
+ def to_json_serializable(val):
91
+ if "float" in str(type(val)):
92
+ return float(val)
93
+ if "int" in str(type(val)):
94
+ return int(val)
95
+ return str(val)
96
+
97
+
98
+ def cleanup(features, feature_file):
99
+ if os.path.exists(feature_file):
100
+ os.remove(feature_file)
101
+ for _side in sides:
102
+ for _unrelated_audio_file in glob(p_join(cache_dir_audio, _side, f"{features['line_no']}.*")):
103
+ os.remove(_unrelated_audio_file)
104
+ # create a dummy so that we can skip from next run
105
+ with open(feature_file, "w") as f:
106
+ json.dump({"dummy": "dummy"}, f)
107
+
108
+
109
+ def get_audio(dataframe: pd.DataFrame):
110
+ resampler = {}
111
+ features = {"line_no": int(dataframe.pop('line_no').values[0])}
112
+ feature_file = p_join(cache_dir_feature, f'{features["line_no"]}.json')
113
+ for side, df in dataframe.groupby("side"):
114
+ df.pop("side")
115
+ features.update({f"{side}.{k}": to_json_serializable(v) for k, v in df.iloc[0].to_dict().items()})
116
+ identifier = os.path.basename(features[f"{side}.url"]).split(".")[-1]
117
+ features[f"{side}.path"] = str(p_join(cache_dir_audio, side, f"{features['line_no']}.{identifier}"))
118
+ start, end = features[f"{side}.duration_start"], features[f"{side}.duration_end"]
119
+ if not os.path.exists(features[f"{side}.path"]):
120
+ print(f"WGET {features[f'{side}.url']}")
121
+ flag = wget(features[f"{side}.url"], output_file=features[f"{side}.path"])
122
+ if not flag:
123
+ print("\n#### ERROR: wget failure ####\n")
124
+ cleanup(features, feature_file)
125
+ return None
126
+ else:
127
+ try:
128
+ print(f"LOAD AUDIO FROM {features[f'{side}.path']}")
129
+ wav, sr = sf.read(features[f"{side}.path"])
130
+ print(f"wav shape:{wav.shape}")
131
+ if wav.ndim > 1:
132
+ wav = wav[:, 0]
133
+ wav = wav[floor(start / sampling_rate * sr):ceil(end / sampling_rate * sr)]
134
+ print(f"wav shape (after truncate):{wav.shape}")
135
+ wav = wav[:int(end/sampling_rate * sr) + sr]
136
+ print(f"SAVING: {features[f'{side}.path']}")
137
+ sf.write(features[f"{side}.path"], wav, sr)
138
+ # if sr != sampling_rate:
139
+ # print(f"RESAMPLING: {wav.shape} length audio")
140
+ # wav = librosa.resample(wav, orig_sr=sr, target_sr=sampling_rate)
141
+ # sf.write(features[f"{side}.path"], wav[start:end], sampling_rate)
142
+
143
+ except Exception as e:
144
+ print(f"\n#### ERROR ####\n {e}")
145
+ cleanup(features, feature_file)
146
+ return None
147
+ print(f"\n### SUCCESS! ###\n:{features['line_no']}")
148
+ with open(feature_file, "w") as f:
149
+ json.dump(features, f)
150
+ return features["line_no"]
151
+
152
+
153
+ def loader(feature: str) -> Dict:
154
+ with open(feature) as f_reader:
155
+ return json.load(f_reader)
156
+
157
+
158
+ if __name__ == '__main__':
159
+ if not skip_download:
160
+ df_metadata = get_metadata()
161
+ print(f"metadata: {len(df_metadata)}, {line_no_start} --> {line_no_end}")
162
+ inputs = [
163
+ g for line_no, g in df_metadata.groupby("line_no")
164
+ if line_no_start <= line_no < line_no_end and not os.path.exists(
165
+ p_join(cache_dir_feature, f'{int(line_no)}.json')
166
+ )
167
+ ]
168
+ print(f"filtered unique lines: {len(inputs)}")
169
+ inputs = [g for g in inputs if len(g["side"].unique()) == 2 and set(g["side"].unique()) == sides]
170
+ print(f"removed side != 2: {len(inputs)}")
171
+
172
+ if n_pool == 1:
173
+ for g in tqdm(inputs, total=len(inputs)):
174
+ line_no = get_audio(g)
175
+ else:
176
+ with Pool(n_pool) as pool:
177
+ for line_no in pool.imap_unordered(get_audio, inputs):
178
+ if line_no:
179
+ print(line_no)
180
+
181
+ print("UPLOADING TO HF!!!")
182
+ features = [p_join(cache_dir_feature, f'{i}.json') for i in range(line_no_start, line_no_end)]
183
+ print(f"- raw feature: {len(features)}")
184
+ features = [i for i in features if os.path.exists(i)]
185
+ print(f"- path exists: {len(features)}")
186
+ features = [loader(i) for i in features]
187
+ features = [i for i in features if "dummy" not in i]
188
+ print(f"- dummy removed: {len(features)}")
189
+ print(f"push {len(features)} records to hub")
190
+ data_dict = {}
191
+ for side in sides:
192
+ data_dict.update({f"{side}.audio": [i.pop(f"{side}.path") for i in features]})
193
+ data_dict.update({k: [i[k] for i in features] for k in features[0].keys()})
194
+ audio_dataset = Dataset.from_dict(data_dict)
195
+ for side in sides:
196
+ audio_dataset = audio_dataset.cast_column(f"{side}.audio", Audio())
197
+ DatasetDict({"train": audio_dataset}).push_to_hub(
198
+ f"{hf_org}/{hf_dataset}",
199
+ config_name=f"subset_{dataset_id}"
200
+ )
201
+ print("clear the workspace")
202
+ for i in tqdm(range(line_no_start, line_no_end), total=line_no_end - line_no_start):
203
+ for audio_file in glob(p_join(cache_dir_audio, "*", f"{i}.*")):
204
+ os.remove(audio_file)
205
+ if os.path.exists(p_join(cache_dir_feature, f"{i}.json")):
206
+ os.remove(p_join(cache_dir_feature, f"{i}.json"))
main.sh DELETED
@@ -1,256 +0,0 @@
1
- export CUDA_VISIBLE_DEVICES=0
2
- export CUDA_VISIBLE_DEVICES=1
3
- rm -rf download/audio
4
- rm -rf download/feature
5
- python -c 'n=41; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/feature/enA-jaA/*.json")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
6
- python -c 'n=41; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/enA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
7
- python -c 'n=41; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/jaA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
8
- python -c 'n=42; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/feature/enA-jaA/*.json")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
9
- python -c 'n=42; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/enA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
10
- python -c 'n=42; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/jaA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
11
- python -c 'n=51; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/feature/enA-jaA/*.json")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
12
- python -c 'n=51; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/enA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
13
- python -c 'n=51; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/jaA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
14
-
15
- python -c 'n=1; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/feature/enA-jaA/*.json")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
16
- python -c 'n=1; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/enA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
17
- python -c 'n=1; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/jaA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
18
- python -c 'n=2; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/feature/enA-jaA/*.json")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
19
- python -c 'n=2; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/enA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
20
- python -c 'n=2; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/jaA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
21
- python -c 'n=3; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/feature/enA-jaA/*.json")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
22
- python -c 'n=3; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/enA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
23
- python -c 'n=3; import os; from glob import glob; tmp = [int(os.path.basename(i).split(".")[0]) for i in glob("download/audio/enA-jaA/jaA/*")]; print(len([x for x in tmp if (n-1) * 2500 <= x < n * 2500]))'
24
-
25
- python -c 'file_name="tmp.mp3"; from datasets import Audio; a=Audio(); wav=a.decode_example({"path": file_name, "bytes": None}); print(wav)'
26
-
27
- ####################
28
- # enA-jaA: 718_606 #
29
- ####################
30
- # test
31
- export DATASET_ID=test
32
- export DIRECTION="enA-jaA"
33
- export LINE_NO_START=0
34
- export LINE_NO_END=10
35
- python download_audio.py
36
-
37
- # main
38
- for i in $(seq 1 144);
39
- do
40
- export N_POOL=15
41
- export DATASET_ID=${i}
42
- export DIRECTION="enA-jaA"
43
- export LINE_NO_START=$(((DATASET_ID-1) * 2500))
44
- export LINE_NO_END=$((DATASET_ID * 2500))
45
- echo ${LINE_NO_START}
46
- python download_audio.py
47
- done
48
-
49
- ####################
50
- # enA-zhA: 1_289_192 #
51
- ####################
52
- # test
53
- export DATASET_ID=test
54
- export DIRECTION="enA-zhA"
55
- export LINE_NO_START=0
56
- export LINE_NO_END=10
57
- python download_audio.py
58
-
59
- ####################
60
- # enA-viA: 740_598 #
61
- ####################
62
- # test
63
- export DATASET_ID=test
64
- export DIRECTION="enA-viA"
65
- export LINE_NO_START=0
66
- export LINE_NO_END=10
67
- python download_audio.py
68
-
69
- ####################
70
- # enA-koA: 511_358 #
71
- ####################
72
- # test
73
- export DATASET_ID=test
74
- export DIRECTION="enA-koA"
75
- export LINE_NO_START=0
76
- export LINE_NO_END=10
77
- python download_audio.py
78
-
79
- ####################
80
- # enA-hiA: #
81
- ####################
82
- # test
83
- export DATASET_ID=test
84
- export DIRECTION="enA-hiA"
85
- export LINE_NO_START=0
86
- export LINE_NO_END=10
87
- python download_audio.py
88
-
89
- ####################
90
- # enA-deA: 511_358 #
91
- ####################
92
- # test
93
- export DATASET_ID=test
94
- export DIRECTION="enA-frA"
95
- export LINE_NO_START=0
96
- export LINE_NO_END=10
97
- python download_audio.py
98
-
99
-
100
- ######################
101
- # enA-jpn: 1_468_292 #
102
- ######################
103
- # test
104
- export DATASET_ID=test
105
- export DIRECTION="enA-jaA"
106
- export LINE_NO_START=0
107
- export LINE_NO_END=10
108
- python download_audio.py
109
-
110
-
111
- # DOWNLOAD AUDIO
112
- for i in $(seq 91 100);
113
- do
114
- export N_POOL=15
115
- export DATASET_ID=${i}
116
- export DIRECTION="enA-jpn"
117
- export LINE_NO_START=$(((DATASET_ID-1) * 2500))
118
- export LINE_NO_END=$((DATASET_ID * 2500))
119
- echo ${LINE_NO_START}
120
- python download_audio.py
121
- done
122
-
123
-
124
- export DIRECTION="enA-jpn"
125
- export LINE_NO_START=0
126
- export LINE_NO_END=50000
127
- python download_audio.py
128
-
129
- export DIRECTION="enA-jpn"
130
- export LINE_NO_START=50000
131
- export LINE_NO_END=100000
132
- python download_audio.py
133
-
134
- export DIRECTION="enA-jpn"
135
- export LINE_NO_START=100000
136
- export LINE_NO_END=150000
137
- python download_audio.py
138
-
139
- export DIRECTION="enA-jpn"
140
- export LINE_NO_START=150000
141
- export LINE_NO_END=300000
142
- python download_audio.py
143
-
144
- export DIRECTION="enA-jpn"
145
- export LINE_NO_START=300000
146
- export LINE_NO_END=360000
147
- python download_audio.py
148
-
149
-
150
- # FILTER AUDIO
151
- export DIRECTION="enA-jpn"
152
- export DIRECTION_SPEECH="enA"
153
- export LINE_NO_START=0
154
- export LINE_NO_END=25000
155
- python filter_audio.py
156
-
157
- export DIRECTION="enA-jpn"
158
- export DIRECTION_SPEECH="enA"
159
- export LINE_NO_START=25000
160
- export LINE_NO_END=50000
161
- python filter_audio.py
162
-
163
- export DIRECTION="enA-jpn"
164
- export DIRECTION_SPEECH="enA"
165
- export LINE_NO_START=50000
166
- export LINE_NO_END=75000
167
- python filter_audio.py
168
-
169
- export DIRECTION="enA-jpn"
170
- export DIRECTION_SPEECH="enA"
171
- export LINE_NO_START=75000
172
- export LINE_NO_END=100000
173
- python filter_audio.py
174
-
175
- export DIRECTION="enA-jpn"
176
- export DIRECTION_SPEECH="enA"
177
- export LINE_NO_START=100000
178
- export LINE_NO_END=125000
179
- python filter_audio.py
180
-
181
- export DIRECTION="enA-jpn"
182
- export DIRECTION_SPEECH="enA"
183
- export LINE_NO_START=125000
184
- export LINE_NO_END=150000
185
- python filter_audio.py
186
-
187
- export DIRECTION="enA-jpn"
188
- export DIRECTION_SPEECH="enA"
189
- export LINE_NO_START=150000
190
- export LINE_NO_END=175000
191
- python filter_audio.py
192
-
193
- export DIRECTION="enA-jpn"
194
- export DIRECTION_SPEECH="enA"
195
- export LINE_NO_START=175000
196
- export LINE_NO_END=200000
197
- python filter_audio.py
198
-
199
-
200
- export DIRECTION="enA-jpn"
201
- export DIRECTION_SPEECH="enA"
202
- export LINE_NO_START=200000
203
- export LINE_NO_END=225000
204
- python filter_audio.py
205
-
206
- #
207
- #export LINE_NO_START=150000
208
- #export LINE_NO_END=300000
209
- #export DATASET_ID="0"
210
- #python push_s2t_translation.py
211
- #
212
- #
213
- #export LINE_NO_START=300000
214
- #export LINE_NO_END=360000
215
- #export DATASET_ID="0"
216
- #python push_s2t_translation.py
217
-
218
-
219
-
220
- # DOWNLOAD TEXT
221
- git clone https://github.com/kpu/preprocess
222
- cd preprocess
223
- git checkout wet
224
- git submodule update --init --recursive
225
- mkdir build
226
- cd build
227
- cmake ..
228
- make -j4
229
- alias wet_lines="${PWD}/build/bin/wet_lines"
230
- cd ../
231
- wget https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz
232
- cp ../download_text.py ./
233
- python download_text.py
234
- cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_1.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_1.tsv
235
- cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_2.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_2.tsv
236
- cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_3.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_3.tsv
237
- cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_4.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_4.tsv
238
- cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_5.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_5.tsv
239
- cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_6.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_6.tsv
240
- cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_7.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_7.tsv
241
- cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_8.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_8.tsv
242
- cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_9.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_9.tsv
243
- cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_10.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_10.tsv
244
- cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_11.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_11.tsv
245
- cp ../format_text.py ./
246
- python format_text.py
247
- mv text.enA-jpn.json ../
248
- cd ../
249
-
250
-
251
- ########
252
- # NLLB #
253
- ########
254
- # https://www.kecl.ntt.co.jp/icl/lirg/jparacrawl/
255
- python -c "from datasets import load_dataset; load_dataset('allenai/nllb', 'eng_Latn-jpn_Jpan')"
256
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
main_s2s.sh ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ####################
2
+ # enA-jaA: 718_606 #
3
+ ####################
4
+ # test
5
+ export DATASET_ID=test
6
+ export DIRECTION="enA-jaA"
7
+ export LINE_NO_START=0
8
+ export LINE_NO_END=10
9
+ python fetch_dataset_s2s.py
10
+ # main
11
+ for i in $(seq 1 144);
12
+ do
13
+ export N_POOL=15
14
+ export DATASET_ID=${i}
15
+ export DIRECTION="enA-jaA"
16
+ export LINE_NO_START=$(((DATASET_ID-1) * 2500))
17
+ export LINE_NO_END=$((DATASET_ID * 2500))
18
+ echo ${LINE_NO_START}
19
+ python fetch_dataset_s2s.py
20
+ done
21
+
22
+ ######################
23
+ # enA-zhA: 1_289_192 #
24
+ ######################
25
+ # test
26
+ export DATASET_ID=test
27
+ export DIRECTION="enA-zhA"
28
+ export LINE_NO_START=0
29
+ export LINE_NO_END=10
30
+ python fetch_dataset_s2s.py
31
+
32
+ ####################
33
+ # enA-viA: 740_598 #
34
+ ####################
35
+ # test
36
+ export DATASET_ID=test
37
+ export DIRECTION="enA-viA"
38
+ export LINE_NO_START=0
39
+ export LINE_NO_END=10
40
+ python fetch_dataset_s2s.py
41
+ # main
42
+ for i in $(seq 1 40);
43
+ do
44
+ export N_POOL=15
45
+ export DATASET_ID=${i}
46
+ export DIRECTION="enA-viA"
47
+ export LINE_NO_START=$(((DATASET_ID-1) * 2500))
48
+ export LINE_NO_END=$((DATASET_ID * 2500))
49
+ echo ${LINE_NO_START}
50
+ python fetch_dataset_s2s.py
51
+ done
52
+
53
+ ####################
54
+ # enA-koA: 511_358 #
55
+ ####################
56
+ # test
57
+ export DATASET_ID=test
58
+ export DIRECTION="enA-koA"
59
+ export LINE_NO_START=0
60
+ export LINE_NO_END=10
61
+ python fetch_dataset_s2s.py
62
+
63
+ ####################
64
+ # enA-hiA: 454_942 #
65
+ ####################
66
+ # test
67
+ export DATASET_ID=test
68
+ export DIRECTION="enA-hiA"
69
+ export LINE_NO_START=0
70
+ export LINE_NO_END=10
71
+ python fetch_dataset_s2s.py
72
+
73
+ ######################
74
+ # enA-frA: 3_054_258 #
75
+ ######################
76
+ # test
77
+ export DATASET_ID=test
78
+ export DIRECTION="enA-frA"
79
+ export LINE_NO_START=0
80
+ export LINE_NO_END=10
81
+ python fetch_dataset_s2s.py
82
+
83
+ ######################
84
+ # enA-esA: 2_658_022 #
85
+ ######################
86
+ # test
87
+ export DATASET_ID=test
88
+ export DIRECTION="enA-esA"
89
+ export LINE_NO_START=0
90
+ export LINE_NO_END=10
91
+ python fetch_dataset_s2s.py
92
+
93
+ ######################
94
+ # enA-deA: 1_965_186 #
95
+ ######################
96
+ # test
97
+ export DATASET_ID=test
98
+ export DIRECTION="deA-enA"
99
+ export LINE_NO_START=0
100
+ export LINE_NO_END=10
101
+ python fetch_dataset_s2s.py
main_s2t.sh ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ######################
2
+ # enA-jpn: 1_468_292 #
3
+ ######################
4
+ # test
5
+ export DATASET_ID=test
6
+ export DIRECTION="enA-jaA"
7
+ export LINE_NO_START=0
8
+ export LINE_NO_END=10
9
+ python download_audio.py
10
+
11
+
12
+ # DOWNLOAD AUDIO
13
+ for i in $(seq 91 100);
14
+ do
15
+ export N_POOL=15
16
+ export DATASET_ID=${i}
17
+ export DIRECTION="enA-jpn"
18
+ export LINE_NO_START=$(((DATASET_ID-1) * 2500))
19
+ export LINE_NO_END=$((DATASET_ID * 2500))
20
+ echo ${LINE_NO_START}
21
+ python download_audio.py
22
+ done
23
+
24
+ # download text
25
+ git clone https://github.com/kpu/preprocess
26
+ cd preprocess
27
+ git checkout wet
28
+ git submodule update --init --recursive
29
+ mkdir build
30
+ cd build
31
+ cmake ..
32
+ make -j4
33
+ alias wet_lines="${PWD}/build/bin/wet_lines"
34
+ cd ../
35
+ wget https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.withduration.tsv.gz
36
+ cp ../download_text.py ./
37
+ python download_text.py
38
+ cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_1.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_1.tsv
39
+ cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_2.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_2.tsv
40
+ cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_3.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_3.tsv
41
+ cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_4.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_4.tsv
42
+ cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_5.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_5.tsv
43
+ cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_6.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_6.tsv
44
+ cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_7.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_7.tsv
45
+ cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_8.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_8.tsv
46
+ cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_9.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_9.tsv
47
+ cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_10.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_10.tsv
48
+ cat seamless.dataset.metadata.public.enA-jpn.withduration.reordered.batch_11.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee seamless.dataset.metadata.public.jpn.batch_11.tsv
49
+ cp ../format_text.py ./
50
+ python format_text.py
51
+ mv text.enA-jpn.json ../
52
+ cd ../
53
+
54
+
55
+ ########
56
+ # NLLB #
57
+ ########
58
+ # https://www.kecl.ntt.co.jp/icl/lirg/jparacrawl/
59
+ python -c "from datasets import load_dataset; load_dataset('allenai/nllb', 'eng_Latn-jpn_Jpan')"
60
+