Kr08 commited on
Commit
9352d35
·
verified ·
1 Parent(s): 5364e22

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -7
app.py CHANGED
@@ -31,7 +31,7 @@ def load_qa_model():
31
  logger.info(f"Q&A model loaded successfully")
32
  return qa_pipeline
33
  except Exception as e:
34
- logger.warning(f"Failed to load Q&A model. Error: {str(e)}")
35
  return None
36
 
37
 
@@ -43,7 +43,7 @@ def load_summarization_model():
43
  logger.info(f"Summarization model loaded successfully on {'GPU' if cuda_available else 'CPU'}")
44
  return summarizer
45
  except Exception as e:
46
- logger.warning(f"Failed to load summarization model on GPU. Falling back to CPU. Error: {str(e)}")
47
  summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6", device=-1)
48
  logger.info("Summarization model loaded successfully on CPU")
49
  return summarizer
@@ -121,6 +121,7 @@ def process_and_summarize(audio_file, translate, model_size, do_summarize=True):
121
  raise gr.Error(f"Processing failed: {str(e)}")
122
 
123
 
 
124
  @spaces.GPU(duration=60)
125
  def answer_question(context, question):
126
  logger.info("Starting Q&A process")
@@ -135,18 +136,19 @@ def answer_question(context, question):
135
  ]
136
 
137
  outputs = qa_pipeline(messages, max_new_tokens=256)
138
- answer = outputs[0]["generated_text"]
139
 
140
  # Extract the answer from the generated text
141
- answer = answer.split("assistant:")[-1].strip()
 
 
 
142
 
143
  logger.info("Q&A process completed successfully")
144
- return answer
145
  except Exception as e:
146
  logger.error(f"Q&A process failed: {str(e)}")
147
  logger.error(traceback.format_exc())
148
- return "Error occurred during Q&A process. Please try again."
149
-
150
 
151
  # Main interface
152
  with gr.Blocks() as iface:
 
31
  logger.info(f"Q&A model loaded successfully")
32
  return qa_pipeline
33
  except Exception as e:
34
+ logger.warning(f"Failed to load Q&A model. Error: \n{str(e)}")
35
  return None
36
 
37
 
 
43
  logger.info(f"Summarization model loaded successfully on {'GPU' if cuda_available else 'CPU'}")
44
  return summarizer
45
  except Exception as e:
46
+ logger.warning(f"Failed to load summarization model on GPU. Falling back to CPU. Error: \n{str(e)}")
47
  summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6", device=-1)
48
  logger.info("Summarization model loaded successfully on CPU")
49
  return summarizer
 
121
  raise gr.Error(f"Processing failed: {str(e)}")
122
 
123
 
124
+
125
  @spaces.GPU(duration=60)
126
  def answer_question(context, question):
127
  logger.info("Starting Q&A process")
 
136
  ]
137
 
138
  outputs = qa_pipeline(messages, max_new_tokens=256)
 
139
 
140
  # Extract the answer from the generated text
141
+ full_response = outputs[0]["generated_text"]
142
+
143
+ # Split the response to get only the assistant's reply
144
+ assistant_response = full_response.split("assistant:")[-1].strip() if "assistant:" in full_response else full_response
145
 
146
  logger.info("Q&A process completed successfully")
147
+ return assistant_response
148
  except Exception as e:
149
  logger.error(f"Q&A process failed: {str(e)}")
150
  logger.error(traceback.format_exc())
151
+ return f"Error occurred during Q&A process. Please try again. Error: \n{str(e)}"
 
152
 
153
  # Main interface
154
  with gr.Blocks() as iface: