polixonrio commited on
Commit
e066e7c
1 Parent(s): b1a0b6c

Training in progress, step 1000

Browse files
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb7e7cdb12c993247dae38eaff7decc384670c9aff9b01b96ce60cb6b4c49c22
3
  size 967102601
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:148dc9d8e83df3c44cac9de9435f8e4448aea496e86b272f78ecad49ce3c1541
3
  size 967102601
run.log ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 12/12/2022 06:37:01 - WARNING - __main__ - Process rank: -1, device: cuda:0, n_gpu: 1distributed training: False, 16-bits training: True
2
+ 12/12/2022 06:37:01 - INFO - __main__ - Training/evaluation parameters Seq2SeqTrainingArguments(
3
+ _n_gpu=1,
4
+ adafactor=False,
5
+ adam_beta1=0.9,
6
+ adam_beta2=0.999,
7
+ adam_epsilon=1e-08,
8
+ auto_find_batch_size=False,
9
+ bf16=False,
10
+ bf16_full_eval=False,
11
+ data_seed=None,
12
+ dataloader_drop_last=False,
13
+ dataloader_num_workers=0,
14
+ dataloader_pin_memory=True,
15
+ ddp_bucket_cap_mb=None,
16
+ ddp_find_unused_parameters=None,
17
+ ddp_timeout=1800,
18
+ debug=[],
19
+ deepspeed=None,
20
+ disable_tqdm=False,
21
+ do_eval=True,
22
+ do_predict=False,
23
+ do_train=True,
24
+ eval_accumulation_steps=None,
25
+ eval_delay=0,
26
+ eval_steps=1000,
27
+ evaluation_strategy=steps,
28
+ fp16=True,
29
+ fp16_backend=auto,
30
+ fp16_full_eval=False,
31
+ fp16_opt_level=O1,
32
+ fsdp=[],
33
+ fsdp_min_num_params=0,
34
+ fsdp_transformer_layer_cls_to_wrap=None,
35
+ full_determinism=False,
36
+ generation_max_length=225,
37
+ generation_num_beams=None,
38
+ gradient_accumulation_steps=1,
39
+ gradient_checkpointing=True,
40
+ greater_is_better=False,
41
+ group_by_length=False,
42
+ half_precision_backend=auto,
43
+ hub_model_id=None,
44
+ hub_private_repo=False,
45
+ hub_strategy=every_save,
46
+ hub_token=<HUB_TOKEN>,
47
+ ignore_data_skip=False,
48
+ include_inputs_for_metrics=False,
49
+ jit_mode_eval=False,
50
+ label_names=None,
51
+ label_smoothing_factor=0.0,
52
+ learning_rate=1e-05,
53
+ length_column_name=input_length,
54
+ load_best_model_at_end=True,
55
+ local_rank=-1,
56
+ log_level=passive,
57
+ log_level_replica=passive,
58
+ log_on_each_node=True,
59
+ logging_dir=./runs/Dec12_06-37-01_129-213-128-6,
60
+ logging_first_step=False,
61
+ logging_nan_inf_filter=True,
62
+ logging_steps=25,
63
+ logging_strategy=steps,
64
+ lr_scheduler_type=linear,
65
+ max_grad_norm=1.0,
66
+ max_steps=5000,
67
+ metric_for_best_model=wer,
68
+ mp_parameters=,
69
+ no_cuda=False,
70
+ num_train_epochs=3.0,
71
+ optim=adamw_hf,
72
+ optim_args=None,
73
+ output_dir=./,
74
+ overwrite_output_dir=True,
75
+ past_index=-1,
76
+ per_device_eval_batch_size=32,
77
+ per_device_train_batch_size=64,
78
+ predict_with_generate=True,
79
+ prediction_loss_only=False,
80
+ push_to_hub=True,
81
+ push_to_hub_model_id=None,
82
+ push_to_hub_organization=None,
83
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
84
+ ray_scope=last,
85
+ remove_unused_columns=True,
86
+ report_to=['tensorboard'],
87
+ resume_from_checkpoint=None,
88
+ run_name=./,
89
+ save_on_each_node=False,
90
+ save_steps=1000,
91
+ save_strategy=steps,
92
+ save_total_limit=None,
93
+ seed=42,
94
+ sharded_ddp=[],
95
+ skip_memory_metrics=True,
96
+ sortish_sampler=False,
97
+ tf32=None,
98
+ torch_compile=False,
99
+ torch_compile_backend=None,
100
+ torch_compile_mode=None,
101
+ torchdynamo=None,
102
+ tpu_metrics_debug=False,
103
+ tpu_num_cores=None,
104
+ use_ipex=False,
105
+ use_legacy_prediction_loop=False,
106
+ use_mps_device=False,
107
+ warmup_ratio=0.0,
108
+ warmup_steps=500,
109
+ weight_decay=0.0,
110
+ xpu_backend=None,
111
+ )
112
+ 12/12/2022 06:37:01 - INFO - __main__ - Training/evaluation parameters Seq2SeqTrainingArguments(
113
+ _n_gpu=1,
114
+ adafactor=False,
115
+ adam_beta1=0.9,
116
+ adam_beta2=0.999,
117
+ adam_epsilon=1e-08,
118
+ auto_find_batch_size=False,
119
+ bf16=False,
120
+ bf16_full_eval=False,
121
+ data_seed=None,
122
+ dataloader_drop_last=False,
123
+ dataloader_num_workers=0,
124
+ dataloader_pin_memory=True,
125
+ ddp_bucket_cap_mb=None,
126
+ ddp_find_unused_parameters=None,
127
+ ddp_timeout=1800,
128
+ debug=[],
129
+ deepspeed=None,
130
+ disable_tqdm=False,
131
+ do_eval=True,
132
+ do_predict=False,
133
+ do_train=True,
134
+ eval_accumulation_steps=None,
135
+ eval_delay=0,
136
+ eval_steps=1000,
137
+ evaluation_strategy=steps,
138
+ fp16=True,
139
+ fp16_backend=auto,
140
+ fp16_full_eval=False,
141
+ fp16_opt_level=O1,
142
+ fsdp=[],
143
+ fsdp_min_num_params=0,
144
+ fsdp_transformer_layer_cls_to_wrap=None,
145
+ full_determinism=False,
146
+ generation_max_length=225,
147
+ generation_num_beams=None,
148
+ gradient_accumulation_steps=1,
149
+ gradient_checkpointing=True,
150
+ greater_is_better=False,
151
+ group_by_length=False,
152
+ half_precision_backend=auto,
153
+ hub_model_id=None,
154
+ hub_private_repo=False,
155
+ hub_strategy=every_save,
156
+ hub_token=<HUB_TOKEN>,
157
+ ignore_data_skip=False,
158
+ include_inputs_for_metrics=False,
159
+ jit_mode_eval=False,
160
+ label_names=None,
161
+ label_smoothing_factor=0.0,
162
+ learning_rate=1e-05,
163
+ length_column_name=input_length,
164
+ load_best_model_at_end=True,
165
+ local_rank=-1,
166
+ log_level=passive,
167
+ log_level_replica=passive,
168
+ log_on_each_node=True,
169
+ logging_dir=./runs/Dec12_06-37-01_129-213-128-6,
170
+ logging_first_step=False,
171
+ logging_nan_inf_filter=True,
172
+ logging_steps=25,
173
+ logging_strategy=steps,
174
+ lr_scheduler_type=linear,
175
+ max_grad_norm=1.0,
176
+ max_steps=5000,
177
+ metric_for_best_model=wer,
178
+ mp_parameters=,
179
+ no_cuda=False,
180
+ num_train_epochs=3.0,
181
+ optim=adamw_hf,
182
+ optim_args=None,
183
+ output_dir=./,
184
+ overwrite_output_dir=True,
185
+ past_index=-1,
186
+ per_device_eval_batch_size=32,
187
+ per_device_train_batch_size=64,
188
+ predict_with_generate=True,
189
+ prediction_loss_only=False,
190
+ push_to_hub=True,
191
+ push_to_hub_model_id=None,
192
+ push_to_hub_organization=None,
193
+ push_to_hub_token=<PUSH_TO_HUB_TOKEN>,
194
+ ray_scope=last,
195
+ remove_unused_columns=True,
196
+ report_to=['tensorboard'],
197
+ resume_from_checkpoint=None,
198
+ run_name=./,
199
+ save_on_each_node=False,
200
+ save_steps=1000,
201
+ save_strategy=steps,
202
+ save_total_limit=None,
203
+ seed=42,
204
+ sharded_ddp=[],
205
+ skip_memory_metrics=True,
206
+ sortish_sampler=False,
207
+ tf32=None,
208
+ torch_compile=False,
209
+ torch_compile_backend=None,
210
+ torch_compile_mode=None,
211
+ torchdynamo=None,
212
+ tpu_metrics_debug=False,
213
+ tpu_num_cores=None,
214
+ use_ipex=False,
215
+ use_legacy_prediction_loop=False,
216
+ use_mps_device=False,
217
+ warmup_ratio=0.0,
218
+ warmup_steps=500,
219
+ weight_decay=0.0,
220
+ xpu_backend=None,
221
+ )
222
+ 12/12/2022 06:37:01 - INFO - datasets.utils.file_utils - https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/resolve/main/common_voice_11_0.py not found in cache or force_download set to True, downloading to /home/ubuntu/.cache/huggingface/datasets/downloads/tmpdbxqdsn0
223
+ 12/12/2022 06:37:01 - INFO - datasets.utils.file_utils - storing https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/resolve/main/common_voice_11_0.py in cache at /home/ubuntu/.cache/huggingface/datasets/downloads/5abdbe284df2909d62a839c9de9034226d2d813d25f7409b4088e82b7b16a366.7401280e97fd4bb6082c8bf5b5b90c47c5864c5dba05b2175fe0ed482e38da4d.py
224
+ 12/12/2022 06:37:01 - INFO - datasets.utils.file_utils - creating metadata file for /home/ubuntu/.cache/huggingface/datasets/downloads/5abdbe284df2909d62a839c9de9034226d2d813d25f7409b4088e82b7b16a366.7401280e97fd4bb6082c8bf5b5b90c47c5864c5dba05b2175fe0ed482e38da4d.py
225
+ 12/12/2022 06:37:01 - INFO - datasets.utils.file_utils - https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/resolve/main/README.md not found in cache or force_download set to True, downloading to /home/ubuntu/.cache/huggingface/datasets/downloads/tmpooh_q071
226
+ 12/12/2022 06:37:01 - INFO - datasets.utils.file_utils - storing https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/resolve/main/README.md in cache at /home/ubuntu/.cache/huggingface/datasets/downloads/b73ba60fdf4bad8aeccea9e079a8a975a0848ca07c20abfb3101b55bed36fa78.13aef32ddbbf56a80c1e209ff39dc86e67cb27ba8666d8363e8e1a027dc02351
227
+ 12/12/2022 06:37:01 - INFO - datasets.utils.file_utils - creating metadata file for /home/ubuntu/.cache/huggingface/datasets/downloads/b73ba60fdf4bad8aeccea9e079a8a975a0848ca07c20abfb3101b55bed36fa78.13aef32ddbbf56a80c1e209ff39dc86e67cb27ba8666d8363e8e1a027dc02351
228
+ 12/12/2022 06:37:01 - INFO - datasets.utils.file_utils - https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/resolve/main/languages.py not found in cache or force_download set to True, downloading to /home/ubuntu/.cache/huggingface/datasets/downloads/tmpiktwf5x7
229
+ 12/12/2022 06:37:01 - INFO - datasets.utils.file_utils - storing https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/resolve/main/languages.py in cache at /home/ubuntu/.cache/huggingface/datasets/downloads/44d465f76e4534ee3586b64e8d250b7ba50698d3e2cc5115b94171ad143fa776.6007f71ad33ec083cf7ce388a8da9c654779b9b1677dc3c83348ecfe8d430b8c.py
230
+ 12/12/2022 06:37:01 - INFO - datasets.utils.file_utils - creating metadata file for /home/ubuntu/.cache/huggingface/datasets/downloads/44d465f76e4534ee3586b64e8d250b7ba50698d3e2cc5115b94171ad143fa776.6007f71ad33ec083cf7ce388a8da9c654779b9b1677dc3c83348ecfe8d430b8c.py
231
+ 12/12/2022 06:37:01 - INFO - datasets.utils.file_utils - https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/resolve/main/release_stats.py not found in cache or force_download set to True, downloading to /home/ubuntu/.cache/huggingface/datasets/downloads/tmp68qnwpgl
232
+ 12/12/2022 06:37:01 - INFO - datasets.utils.file_utils - storing https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/resolve/main/release_stats.py in cache at /home/ubuntu/.cache/huggingface/datasets/downloads/42bf8b971b498e85494eaf891c3a974225ff7f47e30d70fea12a3a6e365e56d9.42cb02a0975fec649048e8496584d95e0cc43f92495c70462dfbe2affb6e2425.py
233
+ 12/12/2022 06:37:01 - INFO - datasets.utils.file_utils - creating metadata file for /home/ubuntu/.cache/huggingface/datasets/downloads/42bf8b971b498e85494eaf891c3a974225ff7f47e30d70fea12a3a6e365e56d9.42cb02a0975fec649048e8496584d95e0cc43f92495c70462dfbe2affb6e2425.py
234
+ 12/12/2022 06:37:01 - INFO - datasets.info - Loading Dataset Infos from /home/ubuntu/.cache/huggingface/modules/datasets_modules/datasets/mozilla-foundation--common_voice_11_0/f8e47235d9b4e68fa24ed71d63266a02018ccf7194b2a8c9c598a5f3ab304d9f
235
+ 12/12/2022 06:37:02 - INFO - datasets.info - Loading Dataset Infos from /home/ubuntu/.cache/huggingface/modules/datasets_modules/datasets/mozilla-foundation--common_voice_11_0/f8e47235d9b4e68fa24ed71d63266a02018ccf7194b2a8c9c598a5f3ab304d9f
236
+ 12/12/2022 06:37:02 - INFO - datasets.info - Loading Dataset Infos from /home/ubuntu/.cache/huggingface/modules/datasets_modules/datasets/mozilla-foundation--common_voice_11_0/f8e47235d9b4e68fa24ed71d63266a02018ccf7194b2a8c9c598a5f3ab304d9f
237
+ 12/12/2022 06:37:19 - WARNING - huggingface_hub.repository - /home/ubuntu/whisper-small-fy-NL-Transfer-From-EN/./ is already a clone of https://huggingface.co/polixonrio/whisper-small-fy-NL-Transfer-From-EN. Make sure you pull the latest changes with `repo.git_pull()`.
238
+ {'loss': 7.6922, 'learning_rate': 4.0000000000000003e-07, 'epoch': 0.01}
239
+ {'loss': 5.8503, 'learning_rate': 9.000000000000001e-07, 'epoch': 0.01}
240
+ {'loss': 3.9042, 'learning_rate': 1.4000000000000001e-06, 'epoch': 0.01}
241
+ {'loss': 2.7093, 'learning_rate': 1.9000000000000002e-06, 'epoch': 1.0}
242
+ {'loss': 1.7436, 'learning_rate': 2.4000000000000003e-06, 'epoch': 1.01}
243
+ {'loss': 1.3753, 'learning_rate': 2.9e-06, 'epoch': 1.01}
244
+ {'loss': 1.082, 'learning_rate': 3.4000000000000005e-06, 'epoch': 1.02}
245
+ {'loss': 0.923, 'learning_rate': 3.900000000000001e-06, 'epoch': 2.0}
246
+ {'loss': 0.7541, 'learning_rate': 4.4e-06, 'epoch': 2.01}
247
+ {'loss': 0.681, 'learning_rate': 4.9000000000000005e-06, 'epoch': 2.01}
248
+ {'loss': 0.5853, 'learning_rate': 5.400000000000001e-06, 'epoch': 2.02}
249
+ {'loss': 0.5019, 'learning_rate': 5.9e-06, 'epoch': 3.0}
250
+ {'loss': 0.439, 'learning_rate': 6.4000000000000006e-06, 'epoch': 3.01}
251
+ {'loss': 0.3904, 'learning_rate': 6.9e-06, 'epoch': 3.01}
252
+ {'loss': 0.3482, 'learning_rate': 7.4e-06, 'epoch': 3.02}
253
+ {'loss': 0.2813, 'learning_rate': 7.9e-06, 'epoch': 4.0}
254
+ {'loss': 0.2462, 'learning_rate': 8.400000000000001e-06, 'epoch': 4.01}
255
+ {'loss': 0.2148, 'learning_rate': 8.900000000000001e-06, 'epoch': 4.01}
256
+ {'loss': 0.1904, 'learning_rate': 9.4e-06, 'epoch': 4.02}
257
+ {'loss': 0.1422, 'learning_rate': 9.9e-06, 'epoch': 5.0}
258
+ {'loss': 0.1294, 'learning_rate': 9.955555555555556e-06, 'epoch': 5.01}
259
+ {'loss': 0.0987, 'learning_rate': 9.9e-06, 'epoch': 5.01}
260
+ {'loss': 0.0965, 'learning_rate': 9.844444444444446e-06, 'epoch': 6.0}
261
+ {'loss': 0.0641, 'learning_rate': 9.78888888888889e-06, 'epoch': 6.01}
262
+ {'loss': 0.0626, 'learning_rate': 9.733333333333334e-06, 'epoch': 6.01}
263
+ {'loss': 0.0461, 'learning_rate': 9.677777777777778e-06, 'epoch': 6.02}
264
+ {'loss': 0.0434, 'learning_rate': 9.622222222222222e-06, 'epoch': 7.0}
265
+ {'loss': 0.0332, 'learning_rate': 9.566666666666668e-06, 'epoch': 7.01}
266
+ {'loss': 0.0307, 'learning_rate': 9.511111111111112e-06, 'epoch': 7.01}
267
+ {'loss': 0.0243, 'learning_rate': 9.455555555555557e-06, 'epoch': 7.02}
268
+ {'loss': 0.0197, 'learning_rate': 9.4e-06, 'epoch': 8.0}
269
+ {'loss': 0.0178, 'learning_rate': 9.344444444444446e-06, 'epoch': 8.01}
270
+ {'loss': 0.018, 'learning_rate': 9.28888888888889e-06, 'epoch': 8.01}
271
+ {'loss': 0.0158, 'learning_rate': 9.233333333333334e-06, 'epoch': 8.02}
272
+ {'loss': 0.0128, 'learning_rate': 9.17777777777778e-06, 'epoch': 9.0}
273
+ {'loss': 0.0123, 'learning_rate': 9.122222222222223e-06, 'epoch': 9.01}
274
+ {'loss': 0.011, 'learning_rate': 9.066666666666667e-06, 'epoch': 9.01}
275
+ {'loss': 0.01, 'learning_rate': 9.011111111111111e-06, 'epoch': 9.02}
276
+ {'loss': 0.0086, 'learning_rate': 8.955555555555555e-06, 'epoch': 10.01}
277
+ {'loss': 0.0078, 'learning_rate': 8.900000000000001e-06, 'epoch': 10.01}
run.sh CHANGED
@@ -2,7 +2,7 @@ python run_speech_recognition_seq2seq_streaming.py \
2
  --model_name_or_path="openai/whisper-small" \
3
  --dataset_name="mozilla-foundation/common_voice_11_0" \
4
  --dataset_config_name="fy-NL" \
5
- --train_split_name="train" \
6
  --eval_split_name="test" \
7
  --model_index_name="Whisper Small Western Frisian (Netherlands)" \
8
  --max_steps="5000" \
 
2
  --model_name_or_path="openai/whisper-small" \
3
  --dataset_name="mozilla-foundation/common_voice_11_0" \
4
  --dataset_config_name="fy-NL" \
5
+ --train_split_name="train+validation" \
6
  --eval_split_name="test" \
7
  --model_index_name="Whisper Small Western Frisian (Netherlands)" \
8
  --max_steps="5000" \
runs/Dec12_06-37-01_129-213-128-6/1670827042.115391/events.out.tfevents.1670827042.129-213-128-6.68824.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4697accdf86823e13f9e577eeb8c9d365338fba99d43a855d1bc9351ade9c5a5
3
+ size 5869
runs/Dec12_06-37-01_129-213-128-6/events.out.tfevents.1670827042.129-213-128-6.68824.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1e5f7139ad1b4deb6a380989ba67916db0d79c73efb35a7251b1ea9269321d9
3
+ size 10531
runs/Dec12_11-48-40_129-213-128-6/1670845728.5230641/events.out.tfevents.1670845728.129-213-128-6.2037374.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57920ecca05d876dfa9b6947ebc5001508dc11133c91a890b782a4399d3e2367
3
+ size 5869
runs/Dec12_11-48-40_129-213-128-6/events.out.tfevents.1670845728.129-213-128-6.2037374.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b4b91195c771cf631a6b5ee2f4629d260d84c4acad916ebdceafdc0fc0f6056
3
+ size 10849
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1fed2793182c960f94e65c259eac9262c1bc3500a78583e1ecfcd70232dc5a66
3
  size 3579
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5cbd14f154559bd0603874dc5b11605a7ea8764769ccab0dad9e3ca6131e4b0
3
  size 3579