Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,8 @@ from transformers import pipeline
|
|
2 |
import gradio as gr
|
3 |
from gtts import gTTS
|
4 |
from openai import OpenAI
|
5 |
-
|
|
|
6 |
|
7 |
# Load the Whisper model for speech-to-text
|
8 |
pipe = pipeline(model="openai/whisper-small")
|
@@ -10,7 +11,8 @@ pipe = pipeline(model="openai/whisper-small")
|
|
10 |
# Load the text generation model
|
11 |
# text_pipe = pipeline("text2text-generation", model="google/flan-t5-base")
|
12 |
|
13 |
-
def generate_gpt_response(text):
|
|
|
14 |
response = client.chat.completions.create(
|
15 |
model='gpt-3.5-turbo-0125',
|
16 |
messages=[{"role": "user", "content": text}]
|
@@ -18,13 +20,13 @@ def generate_gpt_response(text):
|
|
18 |
|
19 |
return response.choices[0].message.content
|
20 |
|
21 |
-
def transcribe(audio):
|
22 |
# Transcribe the audio to text
|
23 |
text = pipe(audio)["text"]
|
24 |
|
25 |
# Generate a response from the transcribed text
|
26 |
# lm_response = text_pipe(text)[0]["generated_text"]
|
27 |
-
lm_response = generate_gpt_response(text)
|
28 |
# Convert the response text to speech
|
29 |
tts = gTTS(lm_response, lang='ko')
|
30 |
|
@@ -37,7 +39,10 @@ def transcribe(audio):
|
|
37 |
# Create the Gradio interface
|
38 |
iface = gr.Interface(
|
39 |
fn=transcribe,
|
40 |
-
inputs=
|
|
|
|
|
|
|
41 |
outputs=gr.Audio(type="filepath"),
|
42 |
title="Whisper Small Glaswegian",
|
43 |
description="Realtime demo for Glaswegian speech recognition using a fine-tuned Whisper small model."
|
|
|
2 |
import gradio as gr
|
3 |
from gtts import gTTS
|
4 |
from openai import OpenAI
|
5 |
+
|
6 |
+
|
7 |
|
8 |
# Load the Whisper model for speech-to-text
|
9 |
pipe = pipeline(model="openai/whisper-small")
|
|
|
11 |
# Load the text generation model
|
12 |
# text_pipe = pipeline("text2text-generation", model="google/flan-t5-base")
|
13 |
|
14 |
+
def generate_gpt_response(text, api_key):
|
15 |
+
client = OpenAI(api_key=api_key)
|
16 |
response = client.chat.completions.create(
|
17 |
model='gpt-3.5-turbo-0125',
|
18 |
messages=[{"role": "user", "content": text}]
|
|
|
20 |
|
21 |
return response.choices[0].message.content
|
22 |
|
23 |
+
def transcribe(audio, api_key):
|
24 |
# Transcribe the audio to text
|
25 |
text = pipe(audio)["text"]
|
26 |
|
27 |
# Generate a response from the transcribed text
|
28 |
# lm_response = text_pipe(text)[0]["generated_text"]
|
29 |
+
lm_response = generate_gpt_response(text, api_key)
|
30 |
# Convert the response text to speech
|
31 |
tts = gTTS(lm_response, lang='ko')
|
32 |
|
|
|
39 |
# Create the Gradio interface
|
40 |
iface = gr.Interface(
|
41 |
fn=transcribe,
|
42 |
+
inputs=[
|
43 |
+
gr.Audio(type="filepath"),
|
44 |
+
gr.Textbox(label="OpenAI API Key", type="password") # Add a textbox for the API key
|
45 |
+
],
|
46 |
outputs=gr.Audio(type="filepath"),
|
47 |
title="Whisper Small Glaswegian",
|
48 |
description="Realtime demo for Glaswegian speech recognition using a fine-tuned Whisper small model."
|