TheAIchemist13 commited on
Commit
6c499bc
1 Parent(s): 83b8b6e

Training in progress, step 25

Browse files
config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
- "_name_or_path": "facebook/wav2vec2-base-960h",
3
  "activation_dropout": 0.1,
 
4
  "adapter_kernel_size": 3,
5
  "adapter_stride": 2,
6
  "add_adapter": false,
@@ -8,7 +9,7 @@
8
  "architectures": [
9
  "Wav2Vec2ForCTC"
10
  ],
11
- "attention_dropout": 0.1,
12
  "bos_token_id": 1,
13
  "classifier_proj_size": 256,
14
  "codevector_dim": 256,
@@ -41,7 +42,7 @@
41
  2,
42
  2
43
  ],
44
- "ctc_loss_reduction": "sum",
45
  "ctc_zero_infinity": false,
46
  "diversity_loss_weight": 0.1,
47
  "do_stable_layer_norm": false,
@@ -49,18 +50,18 @@
49
  "feat_extract_activation": "gelu",
50
  "feat_extract_dropout": 0.0,
51
  "feat_extract_norm": "group",
52
- "feat_proj_dropout": 0.1,
53
  "feat_quantizer_dropout": 0.0,
54
  "final_dropout": 0.1,
55
  "gradient_checkpointing": false,
56
  "hidden_act": "gelu",
57
- "hidden_dropout": 0.1,
58
  "hidden_dropout_prob": 0.1,
59
  "hidden_size": 768,
60
  "initializer_range": 0.02,
61
  "intermediate_size": 3072,
62
  "layer_norm_eps": 1e-05,
63
- "layerdrop": 0.1,
64
  "mask_feature_length": 10,
65
  "mask_feature_min_masks": 0,
66
  "mask_feature_prob": 0.0,
@@ -102,7 +103,7 @@
102
  1
103
  ],
104
  "torch_dtype": "float32",
105
- "transformers_version": "4.15.0",
106
  "use_weighted_layer_sum": false,
107
  "vocab_size": 84,
108
  "xvector_output_dim": 512
 
1
  {
2
+ "_name_or_path": "Harveenchadha/hindi_base_wav2vec2",
3
  "activation_dropout": 0.1,
4
+ "adapter_attn_dim": null,
5
  "adapter_kernel_size": 3,
6
  "adapter_stride": 2,
7
  "add_adapter": false,
 
9
  "architectures": [
10
  "Wav2Vec2ForCTC"
11
  ],
12
+ "attention_dropout": 0.0,
13
  "bos_token_id": 1,
14
  "classifier_proj_size": 256,
15
  "codevector_dim": 256,
 
42
  2,
43
  2
44
  ],
45
+ "ctc_loss_reduction": "mean",
46
  "ctc_zero_infinity": false,
47
  "diversity_loss_weight": 0.1,
48
  "do_stable_layer_norm": false,
 
50
  "feat_extract_activation": "gelu",
51
  "feat_extract_dropout": 0.0,
52
  "feat_extract_norm": "group",
53
+ "feat_proj_dropout": 0.0,
54
  "feat_quantizer_dropout": 0.0,
55
  "final_dropout": 0.1,
56
  "gradient_checkpointing": false,
57
  "hidden_act": "gelu",
58
+ "hidden_dropout": 0.0,
59
  "hidden_dropout_prob": 0.1,
60
  "hidden_size": 768,
61
  "initializer_range": 0.02,
62
  "intermediate_size": 3072,
63
  "layer_norm_eps": 1e-05,
64
+ "layerdrop": 0.0,
65
  "mask_feature_length": 10,
66
  "mask_feature_min_masks": 0,
67
  "mask_feature_prob": 0.0,
 
103
  1
104
  ],
105
  "torch_dtype": "float32",
106
+ "transformers_version": "4.34.0",
107
  "use_weighted_layer_sum": false,
108
  "vocab_size": 84,
109
  "xvector_output_dim": 512
preprocessor_config.json CHANGED
@@ -3,8 +3,7 @@
3
  "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
  "feature_size": 1,
5
  "padding_side": "right",
6
- "padding_value": 0,
7
- "return_attention_mask": false,
8
- "sampling_rate": 16000,
9
- "processor_class": "Wav2Vec2ProcessorWithLM"
10
  }
 
3
  "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
  "feature_size": 1,
5
  "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
 
9
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e2f8806249edafa4fa30feb9ec5a095f60f1d13ec98728482c366eacfa7cd31
3
  size 377818721
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d60f082bbe46dcff6809a24176cd6ac8fd3dd38d36d5e5fc06fdafa4cc826202
3
  size 377818721
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c9c810b7d7f6bdcac295cb610ce72d561e963bb9616857077f45c41997611c8
3
  size 4091
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f04a755321257cf17152eaf3ecb18ce44c7c2a57946fced25172e87a068099c
3
  size 4091