FInal Update
Browse files
app.py
CHANGED
@@ -12,32 +12,35 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
12 |
model = model.to(device)
|
13 |
|
14 |
def generate_text(prompt):
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
|
|
31 |
|
32 |
# Gradio interface
|
33 |
def chatbot_response(user_input):
|
34 |
return generate_text(user_input)
|
35 |
|
36 |
-
iface = gr.Interface(
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
|
|
|
|
42 |
|
43 |
-
iface.launch(share=True)
|
|
|
12 |
model = model.to(device)
|
13 |
|
14 |
def generate_text(prompt):
|
15 |
+
if not prompt.strip():
|
16 |
+
return "Please enter a valid question."
|
17 |
+
|
18 |
+
try:
|
19 |
+
input_ids = tokenizer.encode(f"<s>[INST] {prompt} [/INST]", return_tensors="pt").to(device)
|
20 |
+
with torch.no_grad():
|
21 |
+
output = model.generate(
|
22 |
+
input_ids,
|
23 |
+
max_length=200,
|
24 |
+
num_return_sequences=1,
|
25 |
+
do_sample=True,
|
26 |
+
temperature=0.7,
|
27 |
+
)
|
28 |
+
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
|
29 |
+
return generated_text
|
30 |
+
except Exception as e:
|
31 |
+
return f"An error occurred: {str(e)}"
|
32 |
|
33 |
# Gradio interface
|
34 |
def chatbot_response(user_input):
|
35 |
return generate_text(user_input)
|
36 |
|
37 |
+
iface = gr.Interface(
|
38 |
+
fn=chatbot_response,
|
39 |
+
inputs="text",
|
40 |
+
outputs="text",
|
41 |
+
title="Doctors-Patient Chatbot",
|
42 |
+
subtitle="Fine-Tuning GEMMA-2B for Doctor-Patient Interaction",
|
43 |
+
description="Ask me any question related to patient concerns. This model is designed for educational and informational purposes only. Please do not use it for medical diagnosis or treatment. Always consult a qualified healthcare provider for medical advice."
|
44 |
+
)
|
45 |
|
46 |
+
iface.launch(share=True)
|