|
import gradio as gr |
|
import openai |
|
from decouple import config |
|
from gtts import gTTS |
|
import os |
|
import pyttsx3 |
|
import io |
|
import config |
|
|
|
openai.api_key = config.API_KEYS['openai'] |
|
|
|
|
|
messages = [ |
|
{"role": "system", "content": "You are a helpful assistant."}, |
|
] |
|
|
|
|
|
def decipher(audio): |
|
global messages |
|
|
|
|
|
audio_file = open(audio, "rb") |
|
transcript = openai.Audio.transcribe("whisper-1", audio_file) |
|
|
|
messages.append({"role": "user", "content": transcript["text"]}) |
|
|
|
response = openai.ChatCompletion.create( |
|
model="text-davinci-002", |
|
messages=messages |
|
) |
|
|
|
system_message = response["choices"][0]["text"] |
|
engine = pyttsx3.init() |
|
engine.say(system_message) |
|
engine.runAndWait() |
|
messages.append({"role": "assistant", "content": system_message}) |
|
|
|
chat_transcript = "" |
|
for message in messages: |
|
if message['role'] != 'system': |
|
chat_transcript += message['role'] + ": " + message['content'] + "\n\n" |
|
|
|
return chat_transcript |
|
|
|
|
|
interface = gr.Interface(fn=decipher, inputs=gr.Audio( |
|
source="microphone", type="filepath"), outputs="text") |
|
interface.launch() |
|
|