Training in progress, epoch 1
Browse files- .ipynb_checkpoints/run-checkpoint.sh +4 -5
- pytorch_model.bin +1 -1
- run.sh +4 -5
- special_tokens_map.json +1 -1
- training_args.bin +1 -1
.ipynb_checkpoints/run-checkpoint.sh
CHANGED
@@ -5,17 +5,14 @@ python run_speech_recognition_ctc.py \
|
|
5 |
--output_dir="./" \
|
6 |
--cache_dir="../container_0" \
|
7 |
--overwrite_output_dir \
|
8 |
-
--num_train_epochs="
|
9 |
--per_device_train_batch_size="32" \
|
10 |
--per_device_eval_batch_size="16" \
|
11 |
--gradient_accumulation_steps="2" \
|
12 |
--learning_rate="3e-4" \
|
13 |
--warmup_steps="500" \
|
14 |
-
--evaluation_strategy="steps" \
|
15 |
--text_column_name="sentence" \
|
16 |
--length_column_name="input_length" \
|
17 |
-
--save_steps="10" \
|
18 |
-
--eval_steps="10" \
|
19 |
--layerdrop="0.0" \
|
20 |
--save_total_limit="3" \
|
21 |
--freeze_feature_encoder \
|
@@ -26,4 +23,6 @@ python run_speech_recognition_ctc.py \
|
|
26 |
--push_to_hub \
|
27 |
--do_train \
|
28 |
--do_eval \
|
29 |
-
--max_duration_in_seconds="6"
|
|
|
|
|
|
5 |
--output_dir="./" \
|
6 |
--cache_dir="../container_0" \
|
7 |
--overwrite_output_dir \
|
8 |
+
--num_train_epochs="10" \
|
9 |
--per_device_train_batch_size="32" \
|
10 |
--per_device_eval_batch_size="16" \
|
11 |
--gradient_accumulation_steps="2" \
|
12 |
--learning_rate="3e-4" \
|
13 |
--warmup_steps="500" \
|
|
|
14 |
--text_column_name="sentence" \
|
15 |
--length_column_name="input_length" \
|
|
|
|
|
16 |
--layerdrop="0.0" \
|
17 |
--save_total_limit="3" \
|
18 |
--freeze_feature_encoder \
|
|
|
23 |
--push_to_hub \
|
24 |
--do_train \
|
25 |
--do_eval \
|
26 |
+
--max_duration_in_seconds="6" \
|
27 |
+
--evaluation_strategy='epoch' \
|
28 |
+
--save_strategy='epoch' \
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1278024433
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:784a1780fdbcda12fd603cc104204f0a2d6d7d781b9e1d386505a6b7382dff3c
|
3 |
size 1278024433
|
run.sh
CHANGED
@@ -5,17 +5,14 @@ python run_speech_recognition_ctc.py \
|
|
5 |
--output_dir="./" \
|
6 |
--cache_dir="../container_0" \
|
7 |
--overwrite_output_dir \
|
8 |
-
--num_train_epochs="
|
9 |
--per_device_train_batch_size="32" \
|
10 |
--per_device_eval_batch_size="16" \
|
11 |
--gradient_accumulation_steps="2" \
|
12 |
--learning_rate="3e-4" \
|
13 |
--warmup_steps="500" \
|
14 |
-
--evaluation_strategy="steps" \
|
15 |
--text_column_name="sentence" \
|
16 |
--length_column_name="input_length" \
|
17 |
-
--save_steps="10" \
|
18 |
-
--eval_steps="10" \
|
19 |
--layerdrop="0.0" \
|
20 |
--save_total_limit="3" \
|
21 |
--freeze_feature_encoder \
|
@@ -26,4 +23,6 @@ python run_speech_recognition_ctc.py \
|
|
26 |
--push_to_hub \
|
27 |
--do_train \
|
28 |
--do_eval \
|
29 |
-
--max_duration_in_seconds="6"
|
|
|
|
|
|
5 |
--output_dir="./" \
|
6 |
--cache_dir="../container_0" \
|
7 |
--overwrite_output_dir \
|
8 |
+
--num_train_epochs="10" \
|
9 |
--per_device_train_batch_size="32" \
|
10 |
--per_device_eval_batch_size="16" \
|
11 |
--gradient_accumulation_steps="2" \
|
12 |
--learning_rate="3e-4" \
|
13 |
--warmup_steps="500" \
|
|
|
14 |
--text_column_name="sentence" \
|
15 |
--length_column_name="input_length" \
|
|
|
|
|
16 |
--layerdrop="0.0" \
|
17 |
--save_total_limit="3" \
|
18 |
--freeze_feature_encoder \
|
|
|
23 |
--push_to_hub \
|
24 |
--do_train \
|
25 |
--do_eval \
|
26 |
+
--max_duration_in_seconds="6" \
|
27 |
+
--evaluation_strategy='epoch' \
|
28 |
+
--save_strategy='epoch' \
|
special_tokens_map.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 2991
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cd0cc2e29fc5d67aca4c97fcfeb6f7453dfa88fcc92a11295c0d35d13889bd59
|
3 |
size 2991
|