Update README.md
Browse files
README.md
CHANGED
@@ -113,6 +113,7 @@ def predict(audio):
|
|
113 |
audio_data = (sample_rate, waveform) # Replace with your actual audio data
|
114 |
emotion = predict(audio_data)
|
115 |
print(f"Predicted Emotion: {emotion}")
|
|
|
116 |
|
117 |
## Training Procedure
|
118 |
|
@@ -120,10 +121,10 @@ Preprocessing: Resampled all audio to 16kHz.
|
|
120 |
Training: Fine-tuned facebook/wav2vec2-base with emotion labels.
|
121 |
Hyperparameters: Batch size: 16, Learning rate: 5e-5, Epochs: 5
|
122 |
|
123 |
-
##Evaluation
|
124 |
Testing Data
|
125 |
Evaluation was performed on a held-out test set from the CREMA-D and RAVDESS datasets.
|
126 |
|
127 |
-
##Metrics
|
128 |
Accuracy: 85%
|
129 |
F1-score: 82% (weighted average across all classes)
|
|
|
113 |
audio_data = (sample_rate, waveform) # Replace with your actual audio data
|
114 |
emotion = predict(audio_data)
|
115 |
print(f"Predicted Emotion: {emotion}")
|
116 |
+
```
|
117 |
|
118 |
## Training Procedure
|
119 |
|
|
|
121 |
Training: Fine-tuned facebook/wav2vec2-base with emotion labels.
|
122 |
Hyperparameters: Batch size: 16, Learning rate: 5e-5, Epochs: 5
|
123 |
|
124 |
+
## Evaluation
|
125 |
Testing Data
|
126 |
Evaluation was performed on a held-out test set from the CREMA-D and RAVDESS datasets.
|
127 |
|
128 |
+
## Metrics
|
129 |
Accuracy: 85%
|
130 |
F1-score: 82% (weighted average across all classes)
|