Zeebra's picture
Update app.py
3f16f8d
raw
history blame
1.26 kB
import gradio as gr
import openai
from decouple import config
from gtts import gTTS
import os
import pyttsx3
import io
import config
openai.api_key = config.API_KEYS['openai']
# The Models Job or role
messages = [
{"role": "system", "content": "You are a helpful assistant."},
]
# Main method goes here
def decipher(audio):
global messages
# Using openAI's speech to text model
audio_file = open(audio, "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file)
messages.append({"role": "user", "content": transcript["text"]})
response = openai.ChatCompletion.create(
model="text-davinci-002",
messages=messages
)
system_message = response["choices"][0]["text"]
engine = pyttsx3.init()
engine.say(system_message)
engine.runAndWait()
messages.append({"role": "assistant", "content": system_message})
chat_transcript = ""
for message in messages:
if message['role'] != 'system':
chat_transcript += message['role'] + ": " + message['content'] + "\n\n"
return chat_transcript
# Using Gradio's audio Interface
interface = gr.Interface(fn=decipher, inputs=gr.Audio(
source="microphone", type="filepath"), outputs="text")
interface.launch()