asahi417 commited on
Commit
4f70466
·
1 Parent(s): a0302d0
Files changed (4) hide show
  1. format_text.py +13 -3
  2. main_s2s.sh +36 -20
  3. main_s2t.sh +2 -2
  4. text.enA-jpn.json +0 -3
format_text.py CHANGED
@@ -1,10 +1,20 @@
 
1
  import csv
2
  import json
3
  from glob import glob
4
  import pandas as pd
5
 
6
- df = pd.concat([pd.read_csv(i, quoting=csv.QUOTE_NONE, encoding='utf-8', sep='\t', header=None, on_bad_lines='skip') for i in glob('seamless.dataset.metadata.public.jpn.batch_*.tsv')])
7
- line_no = [i.split(" ")[3] for i in df[0]]
 
 
 
 
 
 
 
 
8
  text = df[1].values.tolist()
9
- with open("text.enA-jpn.json", "w") as f:
 
10
  json.dump({l: t for l, t in zip(line_no, text)}, f)
 
1
+ import os
2
  import csv
3
  import json
4
  from glob import glob
5
  import pandas as pd
6
 
7
+
8
+ direction_speech = os.getenv("DIRECTION_SPEECH", "enA")
9
+ direction_text = os.getenv("DIRECTION_TEXT", "jpn")
10
+ direction = os.getenv("DIRECTION", "enA-jpn")
11
+
12
+ df = pd.concat([
13
+ pd.read_csv(i, quoting=csv.QUOTE_NONE, encoding='utf-8', sep='\t', header=None, on_bad_lines='skip')
14
+ for i in glob(f'metadata.{direction_speech}-{direction_text}.batch_*')
15
+ ])
16
+ line_no = [int(i.split(" ")[-1]) for i in df[0]]
17
  text = df[1].values.tolist()
18
+ os.makedirs("text_corpus", exist_ok=True)
19
+ with open(f"text_corpus/text.{direction_speech}-{direction_text}.json", "w") as f:
20
  json.dump({l: t for l, t in zip(line_no, text)}, f)
main_s2s.sh CHANGED
@@ -19,13 +19,7 @@ do
19
  python fetch_dataset_s2s.py
20
  done
21
  # tokenize
22
- for i in $(seq 1 30);
23
- do
24
- export DATASET_ID=${i}
25
- export DIRECTION="enA-jaA"
26
- python tokenize_dataset_s2s.py
27
- done
28
- for i in $(seq 30 60);
29
  do
30
  export DATASET_ID=${i}
31
  export DIRECTION="enA-jaA"
@@ -42,7 +36,7 @@ export LINE_NO_START=0
42
  export LINE_NO_END=10
43
  python fetch_dataset_s2s.py
44
  # main
45
- for i in $(seq 1 40);
46
  do
47
  export N_POOL=15
48
  export DATASET_ID=${i}
@@ -52,7 +46,7 @@ do
52
  echo ${LINE_NO_START}
53
  python fetch_dataset_s2s.py
54
  done
55
- for i in $(seq 41 80);
56
  do
57
  export N_POOL=15
58
  export DATASET_ID=${i}
@@ -62,6 +56,14 @@ do
62
  echo ${LINE_NO_START}
63
  python fetch_dataset_s2s.py
64
  done
 
 
 
 
 
 
 
 
65
 
66
  ####################
67
  # enA-koA: 511_358 #
@@ -104,7 +106,7 @@ export LINE_NO_START=0
104
  export LINE_NO_END=10
105
  python fetch_dataset_s2s.py
106
  # main
107
- for i in $(seq 1 50);
108
  do
109
  export N_POOL=15
110
  export DATASET_ID=${i}
@@ -114,15 +116,19 @@ do
114
  echo ${LINE_NO_START}
115
  python fetch_dataset_s2s.py
116
  done
117
- for i in $(seq 51 91);
 
118
  do
119
- export N_POOL=15
120
  export DATASET_ID=${i}
121
  export DIRECTION="enA-hiA"
122
- export LINE_NO_START=$(((DATASET_ID-1) * 2500))
123
- export LINE_NO_END=$((DATASET_ID * 2500))
124
- echo ${LINE_NO_START}
125
- python fetch_dataset_s2s.py
 
 
 
 
126
  done
127
 
128
  ######################
@@ -135,11 +141,11 @@ export LINE_NO_START=0
135
  export LINE_NO_END=10
136
  python fetch_dataset_s2s.py
137
  # main
138
- for i in $(seq 1 100);
139
  do
140
  export N_POOL=15
141
  export DATASET_ID=${i}
142
- export DIRECTION="enA-viA"
143
  export LINE_NO_START=$(((DATASET_ID-1) * 2500))
144
  export LINE_NO_END=$((DATASET_ID * 2500))
145
  echo ${LINE_NO_START}
@@ -156,11 +162,21 @@ export LINE_NO_START=0
156
  export LINE_NO_END=10
157
  python fetch_dataset_s2s.py
158
  # main
159
- for i in $(seq 1 100);
160
  do
161
  export N_POOL=15
162
  export DATASET_ID=${i}
163
- export DIRECTION="enA-viA"
 
 
 
 
 
 
 
 
 
 
164
  export LINE_NO_START=$(((DATASET_ID-1) * 2500))
165
  export LINE_NO_END=$((DATASET_ID * 2500))
166
  echo ${LINE_NO_START}
 
19
  python fetch_dataset_s2s.py
20
  done
21
  # tokenize
22
+ for i in $(seq 120 144);
 
 
 
 
 
 
23
  do
24
  export DATASET_ID=${i}
25
  export DIRECTION="enA-jaA"
 
36
  export LINE_NO_END=10
37
  python fetch_dataset_s2s.py
38
  # main
39
+ for i in $(seq 121 149);
40
  do
41
  export N_POOL=15
42
  export DATASET_ID=${i}
 
46
  echo ${LINE_NO_START}
47
  python fetch_dataset_s2s.py
48
  done
49
+ for i in 114 77 78 79 80;
50
  do
51
  export N_POOL=15
52
  export DATASET_ID=${i}
 
56
  echo ${LINE_NO_START}
57
  python fetch_dataset_s2s.py
58
  done
59
+ # tokenize
60
+ for i in $(seq 50 120);
61
+ do
62
+ export DATASET_ID=${i}
63
+ export DIRECTION="enA-viA"
64
+ python tokenize_dataset_s2s.py
65
+ done
66
+
67
 
68
  ####################
69
  # enA-koA: 511_358 #
 
106
  export LINE_NO_END=10
107
  python fetch_dataset_s2s.py
108
  # main
109
+ for i in $(seq 1 91);
110
  do
111
  export N_POOL=15
112
  export DATASET_ID=${i}
 
116
  echo ${LINE_NO_START}
117
  python fetch_dataset_s2s.py
118
  done
119
+ # tokenize
120
+ for i in $(seq 21 50);
121
  do
 
122
  export DATASET_ID=${i}
123
  export DIRECTION="enA-hiA"
124
+ python tokenize_dataset_s2s.py
125
+ done
126
+
127
+ for i in $(seq 71 91);
128
+ do
129
+ export DATASET_ID=${i}
130
+ export DIRECTION="enA-hiA"
131
+ python tokenize_dataset_s2s.py
132
  done
133
 
134
  ######################
 
141
  export LINE_NO_END=10
142
  python fetch_dataset_s2s.py
143
  # main
144
+ for i in $(seq 1 258);
145
  do
146
  export N_POOL=15
147
  export DATASET_ID=${i}
148
+ export DIRECTION="enA-zhA"
149
  export LINE_NO_START=$(((DATASET_ID-1) * 2500))
150
  export LINE_NO_END=$((DATASET_ID * 2500))
151
  echo ${LINE_NO_START}
 
162
  export LINE_NO_END=10
163
  python fetch_dataset_s2s.py
164
  # main
165
+ for i in $(seq 1 300);
166
  do
167
  export N_POOL=15
168
  export DATASET_ID=${i}
169
+ export DIRECTION="enA-frA"
170
+ export LINE_NO_START=$(((DATASET_ID-1) * 2500))
171
+ export LINE_NO_END=$((DATASET_ID * 2500))
172
+ echo ${LINE_NO_START}
173
+ python fetch_dataset_s2s.py
174
+ done
175
+ for i in $(seq 300 600);
176
+ do
177
+ export N_POOL=15
178
+ export DATASET_ID=${i}
179
+ export DIRECTION="enA-frA"
180
  export LINE_NO_START=$(((DATASET_ID-1) * 2500))
181
  export LINE_NO_END=$((DATASET_ID * 2500))
182
  echo ${LINE_NO_START}
main_s2t.sh CHANGED
@@ -51,9 +51,9 @@ export CHUNK_SIZE=10
51
  python download_s2t_metadata.py
52
  for i in $(seq 1 ${CHUNK_SIZE});
53
  do
54
- cat seamless.dataset.metadata.public.${DIRECTION_SPEECH}-${DIRECTION_TEXT}.withduration.reordered.batch_${i}.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee metadata.${DIRECTION_SPEECH}-${DIRECTION_TEXT}.batch_1.tsv &
55
  done
56
- #python format_text.py
57
 
58
  ########
59
  # NLLB #
 
51
  python download_s2t_metadata.py
52
  for i in $(seq 1 ${CHUNK_SIZE});
53
  do
54
+ cat seamless.dataset.metadata.public.${DIRECTION_SPEECH}-${DIRECTION_TEXT}.withduration.reordered.batch_${i}.tsv | egrep ^crawl-data | tr '\t' ' ' | wet_lines | tee metadata.${DIRECTION_SPEECH}-${DIRECTION_TEXT}.batch_${i}.tsv &
55
  done
56
+ python format_text.py
57
 
58
  ########
59
  # NLLB #
text.enA-jpn.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:96b5def10895c3cf05bf6d8a3b9626b7832b6e225f834700a87c39d6a2ef99fc
3
- size 1094842