File size: 2,679 Bytes
3622be8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c76e8c8
ea85278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c76e8c8
 
 
 
 
 
 
 
 
 
6c57588
85389e6
6c57588
 
 
 
 
 
 
 
 
 
c76e8c8
 
ea85278
 
 
2ace478
436c948
 
2ace478
 
 
 
 
 
 
d094b53
ea85278
54f280d
 
 
d094b53
 
54f280d
 
 
436c948
54f280d
c76e8c8
54f280d
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
####################
# Install Wet Tool #
####################
# libraries for runpod
sudo apt-get install libcurl4-openssl-dev
sudo apt-get install libbz2-dev
sudo apt-get install liblzma-dev
sudo add-apt-repository ppa:boost-latest/ppa -y
sudo apt-get update
sudo apt-get purge boost* -y
sudo apt-get install libboost-all-dev -y
# clone and build the library
git clone https://github.com/kpu/preprocess
cd preprocess
git checkout wet
git submodule update --init --recursive
mkdir build
cd build
cmake ..
make -j4
alias wet_lines="${PWD}/bin/wet_lines"
cd ../../


###########
# enA-vie #
###########
# text
export DIRECTION_SPEECH="enA"
export DIRECTION_TEXT="vie"
export CHUNK_SIZE=20
python download_s2t_metadata.py
for i in $(seq 1 ${CHUNK_SIZE});
do
  cat seamless.dataset.metadata.public.${DIRECTION_SPEECH}-${DIRECTION_TEXT}.withduration.reordered.batch_${i}.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee metadata.${DIRECTION_SPEECH}-${DIRECTION_TEXT}.batch_${i}.tsv &
done
python format_text.py

###########
# enA-est #
###########
# text
export DIRECTION_SPEECH="enA"
export DIRECTION_TEXT="est"
export CHUNK_SIZE=20
python download_s2t_metadata.py
for i in $(seq 1 ${CHUNK_SIZE});
do
  cat seamless.dataset.metadata.public.${DIRECTION_SPEECH}-${DIRECTION_TEXT}.withduration.reordered.batch_${i}.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee metadata.${DIRECTION_SPEECH}-${DIRECTION_TEXT}.batch_${i}.tsv &
done
python format_text.py
# audio
for i in $(seq 213 300);
do
  export N_POOL=15
  export DATASET_ID=${i}
  export DIRECTION_SPEECH="enA"
  export DIRECTION_TEXT="est"
  export LINE_NO_START=$(((DATASET_ID-1) * 2500))
  export LINE_NO_END=$((DATASET_ID * 2500))
  echo ${LINE_NO_START}
  python fetch_dataset_s2t.py
done


###########
# enA-jpn #
###########
# text
export DIRECTION_SPEECH="enA"
export DIRECTION_TEXT="jpn"
export CHUNK_SIZE=20
python download_s2t_metadata.py
for i in $(seq 1 ${CHUNK_SIZE});
do
  cat seamless.dataset.metadata.public.${DIRECTION_SPEECH}-${DIRECTION_TEXT}.withduration.reordered.batch_${i}.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee metadata.${DIRECTION_SPEECH}-${DIRECTION_TEXT}.batch_${i}.tsv &
done
python format_text.py
# audio
for i in $(seq 233 294);
do
  export N_POOL=15
  export DATASET_ID=${i}
  export DIRECTION_SPEECH="enA"
  export DIRECTION_TEXT="jpn"
  export LINE_NO_START=$(((DATASET_ID-1) * 2500))
  export LINE_NO_END=$((DATASET_ID * 2500))
  echo ${LINE_NO_START}
  python fetch_dataset_s2t.py
done

########
# NLLB #
########
# https://www.kecl.ntt.co.jp/icl/lirg/jparacrawl/
python -c "from datasets import load_dataset; load_dataset('allenai/nllb', 'eng_Latn-jpn_Jpan')"