File size: 4,102 Bytes
f9f5661
 
 
 
 
 
 
 
 
 
 
 
 
c112a84
 
 
fbfe17a
c112a84
 
 
 
fbfe17a
c112a84
 
f9f5661
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
433a19f
 
 
 
 
 
 
ec3dfec
f9f5661
6c75f9b
 
f9f5661
 
 
 
 
 
 
 
 
67c3c57
f9f5661
 
 
 
 
 
 
 
 
fbfe17a
 
f9f5661
 
 
 
 
 
 
 
 
 
 
 
433a19f
 
f9f5661
 
 
42ea618
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import os
from flask import Flask, request, jsonify, flash, redirect, url_for
import torch
import torch.nn.functional as F
import torchaudio
from transformers import AutoConfig, Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification, Wav2Vec2Processor, Wav2Vec2ConformerForCTC
import librosa
import jellyfish
from werkzeug.utils import secure_filename
import gradio as gr


def speech_file_to_array_fn(path, sampling_rate):
    try:
        speech_array, _sampling_rate = torchaudio.load(path)
        resampler = torchaudio.transforms.Resample(_sampling_rate)
        speech = resampler(speech_array[1]).squeeze().numpy()
        return speech
    except:
        speech_array, _sampling_rate = torchaudio.load(path)
        resampler = torchaudio.transforms.Resample(_sampling_rate)
        speech = resampler(speech_array).squeeze().numpy()
        return speech
    

def predict(path, sampling_rate, feature_extractor, device, model, config):
    speech = speech_file_to_array_fn(path, sampling_rate)
    inputs = feature_extractor(speech, sampling_rate=sampling_rate, return_tensors="pt", padding=True)
    inputs = {key: inputs[key].to(device) for key in inputs}
    with torch.no_grad():
        logits = model(**inputs).logits
    scores = F.softmax(logits, dim=1).detach().cpu().numpy()[0]
    outputs = [{"Emotion": config.id2label[i], "Score": f"{round(score * 100, 3):.1f}%"} for i, score in enumerate(scores)]
    return outputs

def get_speech_to_text(model, processor, audio_path):
    data, sample_rate = librosa.load(audio_path, sr=16000)
    input_values = processor(data, return_tensors="pt", padding="longest").input_values
    logits = model(input_values).logits
    predicted_ids = torch.argmax(logits, dim=-1)
    transcription = processor.batch_decode(predicted_ids)
    return transcription

# def get_percentage_match(transcription, text):
#     return jellyfish.damerau_levenshtein_distance(transcription, text)

def get_sos_status(transcription, key_phrase):
    ct = 0
    for words in key_phrase.split(" "):
        # print(type(words))
        if transcription[0].find(words) != -1:
            ct = ct + 1
    if ct == 3:
        sos = 1
    else:
        sos = 0
    return sos

def main(file , micro=None):
    if file is not None and micro is None:
        audio = file
    elif file is None and micro is not None:
        audio = micro
    else:
        print("THERE IS A PROBLEM")
        audio = file
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    SPT_MODEL = "./SPT_model"
    model_name_or_path = "./SER_model"
    config = AutoConfig.from_pretrained(model_name_or_path)
    feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(model_name_or_path)
    sampling_rate = feature_extractor.sampling_rate
    model = Wav2Vec2ForSequenceClassification.from_pretrained(model_name_or_path).to(device)
    processor = Wav2Vec2Processor.from_pretrained(SPT_MODEL)
    model_SPT = Wav2Vec2ConformerForCTC.from_pretrained(SPT_MODEL)
    # path = r'testing_audios\03-01-06-02-02-01-01.wav'
    outputs = predict(audio, sampling_rate, feature_extractor, device = device, model = model, config = config)
    transcription = get_speech_to_text(model_SPT, processor, audio_path=audio)
    key_phrase = "APPLE BRIDGE UNDER"
    status = get_sos_status(transcription, key_phrase)
    max_score = 0
    emotion = ""
    for i in outputs:
        if float(i['Score'][:-1]) > max_score:
            max_score = float(i['Score'][:-1])
            emotion = i['Emotion']
    if emotion in ['disgust', 'fear', 'sadness']:
        emotion = 'negative'
    elif emotion == 'neutral':
        emotion = 'neutral'
    else:
        emotion = 'positive'
    
    if emotion == 'negative' or status == 1:
        sos = 1
    else:
        sos = 0

    return [emotion, transcription, sos]

gr.Interface(
    fn=main, 
    inputs=[gr.Audio(source="upload", type="filepath", label = "File"),
        gr.Audio(source="microphone", type="filepath", streaming=False, label = "Microphone")],
    outputs=[
        "textbox"
    ],
    live=True).launch()