skkjodhpur commited on
Commit
a270145
·
verified ·
1 Parent(s): 3a7cfc3

FInal Update

Browse files
Files changed (1) hide show
  1. app.py +26 -23
app.py CHANGED
@@ -12,32 +12,35 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
12
  model = model.to(device)
13
 
14
  def generate_text(prompt):
15
- # Tokenize input
16
- input_ids = tokenizer.encode(f"<s>[INST] {prompt} [/INST]", return_tensors="pt").to(device)
17
-
18
- # Generate text
19
- with torch.no_grad():
20
- output = model.generate(
21
- input_ids,
22
- max_length=200,
23
- num_return_sequences=1,
24
- do_sample=True,
25
- temperature=0.7,
26
- )
27
-
28
- # Decode and return the generated text
29
- generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
30
- return generated_text
 
31
 
32
  # Gradio interface
33
  def chatbot_response(user_input):
34
  return generate_text(user_input)
35
 
36
- iface = gr.Interface(fn=chatbot_response,
37
- inputs="text",
38
- outputs="text",
39
- title="Doctors-Patient Chatbot",
40
- subtitle="Fine-Tuning GEMMA-2B for Doctor-Patient Interaction",
41
- description="Ask me any question related to patient concerns. This model is designed for educational and informational purposes only. Please do not use it for medical diagnosis or treatment. Always consult a qualified healthcare provider for medical advice.")
 
 
42
 
43
- iface.launch(share=True)
 
12
  model = model.to(device)
13
 
14
  def generate_text(prompt):
15
+ if not prompt.strip():
16
+ return "Please enter a valid question."
17
+
18
+ try:
19
+ input_ids = tokenizer.encode(f"<s>[INST] {prompt} [/INST]", return_tensors="pt").to(device)
20
+ with torch.no_grad():
21
+ output = model.generate(
22
+ input_ids,
23
+ max_length=200,
24
+ num_return_sequences=1,
25
+ do_sample=True,
26
+ temperature=0.7,
27
+ )
28
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
29
+ return generated_text
30
+ except Exception as e:
31
+ return f"An error occurred: {str(e)}"
32
 
33
  # Gradio interface
34
  def chatbot_response(user_input):
35
  return generate_text(user_input)
36
 
37
+ iface = gr.Interface(
38
+ fn=chatbot_response,
39
+ inputs="text",
40
+ outputs="text",
41
+ title="Doctors-Patient Chatbot",
42
+ subtitle="Fine-Tuning GEMMA-2B for Doctor-Patient Interaction",
43
+ description="Ask me any question related to patient concerns. This model is designed for educational and informational purposes only. Please do not use it for medical diagnosis or treatment. Always consult a qualified healthcare provider for medical advice."
44
+ )
45
 
46
+ iface.launch(share=True)