not-lain commited on
Commit
cd3a2e4
1 Parent(s): d2c3d60

fix HF integration

Browse files

this will fix the configuration problem you can test this pr before merging using the following code
```python
from transformers import AutoModelForSequenceClassification
# the revision parameter is only temporary and it should be removed after this pr is merged
# this will also fix the code snippet for the model
model2 = AutoModelForSequenceClassification.from_pretrained("KhaldiAbderrhmane/emotion-classifier-hubert",trust_remote_code = True,revision="refs/pr/1")
```

config.json CHANGED
@@ -1,6 +1,14 @@
1
  {
 
 
 
 
 
 
 
2
  "hidden_size_lstm": 128,
 
3
  "num_classes": 6,
4
- "transformers_version": "4.37.2"
 
5
  }
6
-
 
1
  {
2
+ "architectures": [
3
+ "EmotionClassifierHuBERT"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_emotion_classifier.EmotionClassifierConfig",
7
+ "AutoModelForSequenceClassification": "modeling_emotion_classifier.EmotionClassifierHuBERT"
8
+ },
9
  "hidden_size_lstm": 128,
10
+ "model_type": "hubert",
11
  "num_classes": 6,
12
+ "torch_dtype": "float32",
13
+ "transformers_version": "4.41.1"
14
  }
 
configuration_emotion_classifier.py CHANGED
@@ -1,8 +1,7 @@
1
  from transformers import PretrainedConfig
2
 
3
  class EmotionClassifierConfig(PretrainedConfig):
4
-
5
-
6
  def __init__(
7
  self,
8
  hidden_size_lstm =128,
 
1
  from transformers import PretrainedConfig
2
 
3
  class EmotionClassifierConfig(PretrainedConfig):
4
+ model_type = "hubert"
 
5
  def __init__(
6
  self,
7
  hidden_size_lstm =128,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:959d79d116e1a22ccd3fd16b4e7f4c99b0e359cc67bce15458907db901e86a3c
3
- size 1278105600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c31d92b1b9d4de0f4bef72baff3625000148b627f99e3588ba540b35b203150
3
+ size 1278105656