hey-gemma / app.py
Gabriel C
Update app.py
1003643 verified
raw
history blame
1.65 kB
import os
import gradio as gr
import numpy as np
from groq import Groq
from transformers import pipeline
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en")
groq_client = Groq(api_key=os.getenv('GROQ_API_KEY'))
def transcribe(stream, new_chunk):
"""
Transcribes using whisper
"""
sr, y = new_chunk
y = y.astype(np.float32)
y /= np.max(np.abs(y))
if stream is not None:
stream = np.concatenate([stream, y])
else:
stream = y
return stream, transcriber({"sampling_rate": sr, "raw": stream})["text"]
def autocomplete(text):
"""
Autocomplete the text using Gemma.
"""
if text != "":
response = groq_client.chat.completions.create(
model='gemma-7b-it',
messages=[{"role": "system", "content": "You are a friendly assistant named Gemma."},
{"role": "user", "content": text}]
)
return response.choices[0].message.content
def process_audio(input_audio, new_chunk):
"""
Process the audio input by transcribing and completing the sentences.
Accumulate results to return to Gradio interface.
"""
stream, transcription = transcribe(input_audio, new_chunk)
text = autocomplete(transcription)
print (transcription, text)
return stream, text
demo = gr.Interface(
fn = process_audio,
inputs = ["state", gr.Audio(sources=["microphone"], streaming=True)],
outputs = ["state", gr.Markdown()],
title="Dear Gemma",
description="Talk to the AI assistant.",
live=True,
allow_flagging="never"
)
demo.launch()