File size: 2,252 Bytes
e2e3b5f
 
 
 
 
 
 
ad7f048
e2e3b5f
 
ad7f048
 
e2e3b5f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
# -*- coding: utf-8 -*-

import crepe
import spacy
import librosa
import gradio as gr
import pandas as pd
from transformers import pipeline, RobertaTokenizerFast, TFRobertaForSequenceClassification

asr = pipeline('automatic-speech-recognition', model='facebook/wav2vec2-large-960h-lv60-self')
tokenizer = RobertaTokenizerFast.from_pretrained("arpanghoshal/EmoRoBERTa")
model = TFRobertaForSequenceClassification.from_pretrained("arpanghoshal/EmoRoBERTa")
emo = pipeline('sentiment-analysis', model='arpanghoshal/EmoRoBERTa')
lang_model = spacy.load("en_core_web_sm")

def transcribe_and_describe(audio):

  audio, sr = librosa.load(audio, sr=16000)

  text = asr(audio)['text']

  doc = lang_model(text)
  filler_words = [token.text for token in doc if token.pos_ == 'INTJ']
  filler_word_pr =  len(filler_words) / len(doc)

  flatness = pd.DataFrame(librosa.feature.spectral_flatness(y=audio).T).describe().T
  loudness = pd.DataFrame(librosa.feature.rms(audio).T).describe().T
  time, frequency, confidence, activation = crepe.predict(audio, sr)
  frequency = pd.DataFrame(frequency.T).describe().T

  mean_spectral_flatness = flatness.loc[0, 'mean'] 
  spectral_flatness_std = flatness.loc[0, 'std'] 
  mean_pitch = frequency.loc[0, 'mean'] 
  pitch_std = frequency.loc[0, 'std'] 
  mean_volume = loudness.loc[0, 'mean'] 
  volume_std = loudness.loc[0, 'std'] 

  words_per_minute = len(text.split(" ")) / (librosa.get_duration(audio, sr) / 60)

  emotion = emo(text)[0]['label']

  return (text, filler_word_pr, words_per_minute, mean_pitch, pitch_std, mean_volume, volume_std, mean_spectral_flatness, spectral_flatness_std,  emotion)

gr.Interface(
    fn=transcribe_and_describe, 
    inputs=gr.Audio(source="microphone", type="filepath"), 
    outputs=[
        gr.Text(label="Transcription"), 
        gr.Text(label="Rate of Speech (WPM)"), 
        gr.Text(label="Filler Word Percent"),
        gr.Text(label="Mean Pitch (Hz)"), 
        gr.Text(label="Pitch Variation (Hz)"), 
        gr.Text(label="Mean Volume (W)"),
        gr.Text(label="Volume Variation (W)"),
        gr.Text(label="Mean Spectral Flatness (dB)"),
        gr.Text(label="Spectral Flatness Variation (dB)"),
        gr.Text(label="Emotion")
        ]
        ).launch()