Cristhian2430 commited on
Commit
6723ad4
·
verified ·
1 Parent(s): a5663ee

Training in progress, step 1000

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "openai/whisper-large-v3",
3
  "activation_dropout": 0.0,
4
  "activation_function": "gelu",
5
  "apply_spec_augment": false,
@@ -9,7 +9,7 @@
9
  "attention_dropout": 0.0,
10
  "begin_suppress_tokens": [
11
  220,
12
- 50257
13
  ],
14
  "bos_token_id": 50257,
15
  "classifier_proj_size": 256,
@@ -17,7 +17,7 @@
17
  "decoder_attention_heads": 20,
18
  "decoder_ffn_dim": 5120,
19
  "decoder_layerdrop": 0.0,
20
- "decoder_layers": 32,
21
  "decoder_start_token_id": 50258,
22
  "dropout": 0.0,
23
  "encoder_attention_heads": 20,
@@ -34,18 +34,16 @@
34
  "mask_time_length": 10,
35
  "mask_time_min_masks": 2,
36
  "mask_time_prob": 0.05,
37
- "max_length": 448,
38
  "max_source_positions": 1500,
39
  "max_target_positions": 448,
40
  "median_filter_width": 7,
41
  "model_type": "whisper",
42
  "num_hidden_layers": 32,
43
  "num_mel_bins": 128,
44
- "pad_token_id": 50256,
45
  "scale_embedding": false,
46
- "suppress_tokens": [],
47
  "torch_dtype": "float32",
48
- "transformers_version": "4.41.0.dev0",
49
  "use_cache": true,
50
  "use_weighted_layer_sum": false,
51
  "vocab_size": 51866
 
1
  {
2
+ "_name_or_path": "openai/whisper-large-v3-turbo",
3
  "activation_dropout": 0.0,
4
  "activation_function": "gelu",
5
  "apply_spec_augment": false,
 
9
  "attention_dropout": 0.0,
10
  "begin_suppress_tokens": [
11
  220,
12
+ 50256
13
  ],
14
  "bos_token_id": 50257,
15
  "classifier_proj_size": 256,
 
17
  "decoder_attention_heads": 20,
18
  "decoder_ffn_dim": 5120,
19
  "decoder_layerdrop": 0.0,
20
+ "decoder_layers": 4,
21
  "decoder_start_token_id": 50258,
22
  "dropout": 0.0,
23
  "encoder_attention_heads": 20,
 
34
  "mask_time_length": 10,
35
  "mask_time_min_masks": 2,
36
  "mask_time_prob": 0.05,
 
37
  "max_source_positions": 1500,
38
  "max_target_positions": 448,
39
  "median_filter_width": 7,
40
  "model_type": "whisper",
41
  "num_hidden_layers": 32,
42
  "num_mel_bins": 128,
43
+ "pad_token_id": 50257,
44
  "scale_embedding": false,
 
45
  "torch_dtype": "float32",
46
+ "transformers_version": "4.47.0.dev0",
47
  "use_cache": true,
48
  "use_weighted_layer_sum": false,
49
  "vocab_size": 51866
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2985f36c45745745c7f7c7d5079ae773b534671755a0697ff212fcf45610f3eb
3
+ size 3235581408
runs/Dec05_14-17-07_a35b2528692b/events.out.tfevents.1733408247.a35b2528692b.486.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc857b2c53d8a415314cc3465e217cebcc386e3849941d64082a0a93e4f4b99f
3
+ size 14627
tokenizer_config.json CHANGED
@@ -12987,6 +12987,7 @@
12987
  "clean_up_tokenization_spaces": true,
12988
  "eos_token": "<|endoftext|>",
12989
  "errors": "replace",
 
12990
  "model_max_length": 1000000000000000019884624838656,
12991
  "pad_token": "<|endoftext|>",
12992
  "processor_class": "WhisperProcessor",
 
12987
  "clean_up_tokenization_spaces": true,
12988
  "eos_token": "<|endoftext|>",
12989
  "errors": "replace",
12990
+ "extra_special_tokens": {},
12991
  "model_max_length": 1000000000000000019884624838656,
12992
  "pad_token": "<|endoftext|>",
12993
  "processor_class": "WhisperProcessor",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7b782e35f7b12d3a5b7b3a6c3707dc8c3043cd7920f9f081c416cf177a85a4a2
3
- size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01e26c2f51a6e826cb4ad97120ecc89360ba3e22e0b7cfde7899f17621125662
3
+ size 5496