File size: 4,299 Bytes
b99bb69
 
 
21eb51f
 
 
 
b99bb69
7e7acc6
 
 
 
 
b99bb69
 
7e7acc6
 
21eb51f
7e7acc6
 
 
b99bb69
 
 
7e7acc6
 
 
b99bb69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a85fcba
b99bb69
 
 
 
 
7e7acc6
b99bb69
7e7acc6
b99bb69
21eb51f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b99bb69
 
 
 
 
 
21eb51f
b99bb69
 
21eb51f
 
 
 
 
 
b99bb69
 
7e7acc6
 
 
 
 
 
 
 
a85fcba
 
 
 
 
 
 
 
 
 
 
 
 
 
7e7acc6
 
 
21eb51f
 
 
 
 
 
 
 
 
 
 
7e7acc6
 
 
 
21eb51f
7e7acc6
21eb51f
7e7acc6
 
a85fcba
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
import gradio as gr
from transformers import pipeline
import numpy as np
import pandas as pd
import re
from collections import Counter
from functools import reduce

transcriber = pipeline(
    "automatic-speech-recognition",
    model="openai/whisper-base.en",
    return_timestamps=True,
)


def transcribe_live(state, words_list, new_chunk):
    try:
        words_to_check_for = [word.strip().lower() for word in words_list.split(",")]
    except:
        gr.Warning("Please enter a valid list of words to check for")
        words_to_check_for = []

    stream = state.get("stream", None)
    previous_transcription = state.get("full_transcription", "")
    previous_counts_of_words = state.get(
        "counts_of_words", {word: 0 for word in words_to_check_for}
    )

    if new_chunk is None:
        gr.Info("You can start transcribing by clicking on the Record button")
        print("new chunk is None")
        return state, previous_counts_of_words, previous_transcription

    sr, y = new_chunk

    # Convert to mono if stereo
    if y.ndim > 1:
        y = y.mean(axis=1)

    y = y.astype(np.float32)
    y /= np.max(np.abs(y))

    if stream is not None:
        stream = np.concatenate([stream, y])
    else:
        stream = y

    try:
        new_transcription = transcriber({"sampling_rate": sr, "raw": stream})
        print(f"new transcription: {new_transcription}")
    except Exception as e:
        gr.Error(f"Transcription failed. Error: {e}")
        print(f"Transcription failed. Error: {e}")
        return state, previous_counts_of_words, previous_transcription

    full_transcription_text = new_transcription["text"]

    full_transcription_text_lower = full_transcription_text.lower()

    # Use re to find all the words in the transcription, and their start and end indices
    matches: list[re.Match] = list(
        re.finditer(
            r"\b(" + "|".join(words_to_check_for) + r")\b",
            full_transcription_text_lower,
        )
    )

    counter = Counter(
        match.group(0) for match in matches if match.group(0) in words_to_check_for
    )

    new_counts_of_words = {word: counter.get(word, 0) for word in words_to_check_for}

    new_highlighted_transcription = {
        "text": full_transcription_text,
        "entities": [
            {
                "entity": "FILLER",
                "start": match.start(),
                "end": match.end(),
            }
            for match in matches
        ],
    }

    new_state = {
        "stream": stream,
        "full_transcription": full_transcription_text,
        "counts_of_words": new_counts_of_words,
        "highlighted_transcription": new_highlighted_transcription,
    }

    return (
        new_state,
        new_counts_of_words,
        full_transcription_text,
        new_highlighted_transcription,
    )


with gr.Blocks() as demo:
    state = gr.State(
        value={
            "stream": None,
            "full_transcription": "",
            "counts_of_words": {},
        }
    )

    gr.Markdown(
        """
        # GrammASRian

        This app transcribes your speech in real-time and counts the number of filler words you use.

        The intended use case is to help you become more aware of the filler words you use, so you can reduce them and improve your speech.

        It uses the OpenAI Whisper model for transcription on a streaming configuration.
        """
    )

    filler_words = gr.Textbox(label="List of filer words", value="like, so, you know", info="Enter a comma-separated list of words to check for")
    recording = gr.Audio(streaming=True, label="Recording")

    word_counts = gr.JSON(label="Filler words count", value={})
    # word_counts = gr.BarPlot(label="Filler words count", value={})
    transcription = gr.Textbox(label="Transcription", value="", visible=False)

    highlighted_transcription = gr.HighlightedText(
        label="Transcription",
        value={
            "text": "",
            "entities": [],
        },
        color_map={"FILLER": "red"},
    )

    recording.stream(
        transcribe_live,
        inputs=[state, filler_words, recording],
        outputs=[state, word_counts, transcription, highlighted_transcription],
        stream_every=5,
        time_limit=-1,
    )

demo.launch()