Ar4ikov commited on
Commit
89bf06f
·
1 Parent(s): 1b27893

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -5
app.py CHANGED
@@ -1,15 +1,49 @@
1
  from transformers import pipeline
2
  import gradio as gr
3
  from pyctcdecode import BeamSearchDecoderCTC
4
- from Aniemore import EmotionFromVoice
5
  import os
 
 
 
 
 
 
 
6
 
7
- # emo = pipeline("audio-classification", model="Aniemore/wav2vec2-xlsr-53-russian-emotion-recognition", trust_remote_code=True)
8
- emo = EmotionFromVoice()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
 
11
  def transcribe(audio):
12
- print(audio)
 
13
 
14
  def get_asr_interface():
15
  return gr.Interface(
@@ -26,7 +60,7 @@ interfaces = [
26
  ]
27
 
28
  names = [
29
- "ASR"
30
  ]
31
 
32
  gr.TabbedInterface(interfaces, names).launch(server_name = "0.0.0.0", enable_queue=False)
 
1
  from transformers import pipeline
2
  import gradio as gr
3
  from pyctcdecode import BeamSearchDecoderCTC
 
4
  import os
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ import torchaudio
9
+ from transformers import AutoConfig, AutoModel, Wav2Vec2FeatureExtractor
10
+ import librosa
11
+ import numpy as np
12
 
13
+
14
+ def speech_file_to_array_fn(path, sampling_rate):
15
+ speech_array, _sampling_rate = torchaudio.load(path)
16
+ resampler = torchaudio.transforms.Resample(_sampling_rate)
17
+ speech = resampler(speech_array).squeeze().numpy()
18
+ return speech
19
+
20
+
21
+ def predict(path, sampling_rate):
22
+ speech = speech_file_to_array_fn(path, sampling_rate)
23
+ inputs = feature_extractor(speech, sampling_rate=sampling_rate, return_tensors="pt", padding=True)
24
+ inputs = {key: inputs[key].to(device) for key in inputs}
25
+
26
+ with torch.no_grad():
27
+ logits = model_(**inputs).logits
28
+
29
+ scores = F.softmax(logits, dim=1).detach().cpu().numpy()[0]
30
+ outputs = [{"Emotion": config.id2label[i], "Score": f"{round(score * 100, 3):.1f}%"} for i, score in enumerate(scores)]
31
+ return outputs
32
+
33
+
34
+ TRUST = true
35
+
36
+ config = AutoConfig.from_pretrained('Aniemore/wav2vec2-xlsr-53-russian-emotion-recognition', trust_remote_code=TRUST)
37
+ model = AutoModel.from_pretrained("Aniemore/wav2vec2-xlsr-53-russian-emotion-recognition", trust_remote_code=TRUST)
38
+ feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("Aniemore/wav2vec2-xlsr-53-russian-emotion-recognition")
39
+
40
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
41
+ model.to(device)
42
 
43
 
44
  def transcribe(audio):
45
+ return predict(audio, 16000)
46
+
47
 
48
  def get_asr_interface():
49
  return gr.Interface(
 
60
  ]
61
 
62
  names = [
63
+ "Russian Emotion Recognition"
64
  ]
65
 
66
  gr.TabbedInterface(interfaces, names).launch(server_name = "0.0.0.0", enable_queue=False)