not-lain commited on
Commit
c4c3c57
1 Parent(s): 767253a

passing everything to vectara

Browse files
Files changed (1) hide show
  1. app.py +16 -8
app.py CHANGED
@@ -257,10 +257,18 @@ def process_summary_with_openai(summary):
257
  return str(e)
258
 
259
 
260
- def process_and_query(text=None):
261
  try:
262
- # augment the prompt before feeding it to vectara
263
- text = "the user asks the following to his health adviser " + text
 
 
 
 
 
 
 
 
264
 
265
  # Use the text to query Vectara
266
  vectara_response_json = query_vectara(text)
@@ -411,20 +419,20 @@ with gr.Blocks(theme='ParityError/Anime') as iface :
411
  input_language = gr.Dropdown(languages, label="select the language",value="English",interactive=True)
412
  audio_input = gr.Audio(label="speak",type="filepath",sources="microphone")
413
  audio_output = gr.Markdown(label="output text")
414
- audio_button = gr.Button("process audio")
415
- audio_button.click(process_speech, inputs=[input_language,audio_input], outputs=audio_output)
416
  gr.Examples([["English","sample_input.mp3"]],inputs=[input_language,audio_input])
417
  with gr.Accordion("image identification",open=True):
418
  image_input = gr.Image(label="upload image")
419
  image_output = gr.Markdown(label="output text")
420
- image_button = gr.Button("process image")
421
- image_button.click(process_image, inputs=image_input, outputs=image_output)
422
  gr.Examples(["sick person.jpeg"],inputs=[image_input])
423
  with gr.Accordion("text summarization",open=True):
424
  text_input = gr.Textbox(label="input text",lines=5)
425
  text_output = gr.Markdown(label="output text")
426
  text_button = gr.Button("process text")
427
- text_button.click(process_and_query, inputs=text_input, outputs=text_output)
428
  gr.Examples([
429
  ["What is the proper treatment for buccal herpes?"],
430
  ["Male, 40 presenting with swollen glands and a rash"],
 
257
  return str(e)
258
 
259
 
260
+ def process_and_query(input_language=None,audio_input=None,image_input=None,text_input=None):
261
  try:
262
+ if text is not None :
263
+ # augment the prompt before feeding it to vectara
264
+ text = "the user asks the following to his health adviser " + text
265
+ # process audio
266
+ if audio_input is not None :
267
+ text += "\n"+process_speech(input_language,audio_input)
268
+ # process image
269
+ if image_input is not None :
270
+ text += "\n"+process_image(image_input)
271
+
272
 
273
  # Use the text to query Vectara
274
  vectara_response_json = query_vectara(text)
 
419
  input_language = gr.Dropdown(languages, label="select the language",value="English",interactive=True)
420
  audio_input = gr.Audio(label="speak",type="filepath",sources="microphone")
421
  audio_output = gr.Markdown(label="output text")
422
+ # audio_button = gr.Button("process audio")
423
+ # audio_button.click(process_speech, inputs=[input_language,audio_input], outputs=audio_output)
424
  gr.Examples([["English","sample_input.mp3"]],inputs=[input_language,audio_input])
425
  with gr.Accordion("image identification",open=True):
426
  image_input = gr.Image(label="upload image")
427
  image_output = gr.Markdown(label="output text")
428
+ # image_button = gr.Button("process image")
429
+ # image_button.click(process_image, inputs=image_input, outputs=image_output)
430
  gr.Examples(["sick person.jpeg"],inputs=[image_input])
431
  with gr.Accordion("text summarization",open=True):
432
  text_input = gr.Textbox(label="input text",lines=5)
433
  text_output = gr.Markdown(label="output text")
434
  text_button = gr.Button("process text")
435
+ text_button.click(process_and_query, inputs=[input_language,audio_input,image_input,text_input], outputs=text_output)
436
  gr.Examples([
437
  ["What is the proper treatment for buccal herpes?"],
438
  ["Male, 40 presenting with swollen glands and a rash"],