Update app.py
Browse files
app.py
CHANGED
@@ -134,27 +134,30 @@ def generate_response(transcribed_text):
|
|
134 |
)
|
135 |
return response.choices[0].message.content
|
136 |
|
137 |
-
|
138 |
def inference(text):
|
139 |
#client = openai.OpenAI(api_key='your_api_key')
|
140 |
response = client.audio.speech.create(
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
)
|
145 |
output_file = "tts_output.mp3"
|
146 |
response.stream_to_file(output_file)
|
147 |
return output_file
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
|
149 |
demo = gr.Interface(
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
)
|
159 |
-
|
160 |
-
demo.launch()
|
|
|
134 |
)
|
135 |
return response.choices[0].message.content
|
136 |
|
|
|
137 |
def inference(text):
|
138 |
#client = openai.OpenAI(api_key='your_api_key')
|
139 |
response = client.audio.speech.create(
|
140 |
+
model="tts-1",
|
141 |
+
voice="alloy",
|
142 |
+
input=text
|
143 |
)
|
144 |
output_file = "tts_output.mp3"
|
145 |
response.stream_to_file(output_file)
|
146 |
return output_file
|
147 |
+
|
148 |
+
def process_audio_and_respond(audio):
|
149 |
+
text = transcribe(audio)
|
150 |
+
response_text = generate_response(text)
|
151 |
+
output_file = inference(response_text)
|
152 |
+
return response_text, output_file
|
153 |
|
154 |
demo = gr.Interface(
|
155 |
+
process_audio_and_respond,
|
156 |
+
gr.inputs.Audio(source="microphone", type="filepath", label="Bonyeza kitufe cha kurekodi na uliza swali lako"),
|
157 |
+
[gr.outputs.Textbox(label="Jibu (kwa njia ya maandishi)"), gr.outputs.Audio(type="filepath", label="Jibu kwa njia ya sauti (Bofya kusikiliza Jibu)")],
|
158 |
+
description="Uliza Swali kuhusu haki and sheria za udongo",
|
159 |
+
theme="compact",
|
160 |
+
layout="vertical",
|
161 |
+
allow_flagging=False,
|
162 |
+
live=True,
|
163 |
+
)
|
|
|
|