Spaces:
Runtime error
Runtime error
debugging audio
Browse files
app.py
CHANGED
@@ -220,6 +220,7 @@ def convert_to_markdown(vectara_response_json):
|
|
220 |
|
221 |
def process_and_query(text=None, image=None, audio=None):
|
222 |
try:
|
|
|
223 |
# If an image is provided, process it with OpenAI and use the response as the text query for Vectara
|
224 |
if image is not None:
|
225 |
text = process_image(image)
|
@@ -227,7 +228,7 @@ def process_and_query(text=None, image=None, audio=None):
|
|
227 |
if audio is not None:
|
228 |
text = process_speech(audio)
|
229 |
# this should print in the log the text that was extracted from the audio
|
230 |
-
print("
|
231 |
|
232 |
# Now, use the text (either provided by the user or obtained from OpenAI) to query Vectara
|
233 |
vectara_response_json = query_vectara(text)
|
|
|
220 |
|
221 |
def process_and_query(text=None, image=None, audio=None):
|
222 |
try:
|
223 |
+
print("text_value :", text)
|
224 |
# If an image is provided, process it with OpenAI and use the response as the text query for Vectara
|
225 |
if image is not None:
|
226 |
text = process_image(image)
|
|
|
228 |
if audio is not None:
|
229 |
text = process_speech(audio)
|
230 |
# this should print in the log the text that was extracted from the audio
|
231 |
+
print("process_speech_out : ", text)
|
232 |
|
233 |
# Now, use the text (either provided by the user or obtained from OpenAI) to query Vectara
|
234 |
vectara_response_json = query_vectara(text)
|