Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import streamlit as st
|
2 |
from transformers import AutoTokenizer
|
3 |
-
from
|
|
|
4 |
#from peft import PeftModel, PeftConfig
|
5 |
from transformers import AutoModelForCausalLM
|
6 |
from datasets import load_dataset
|
@@ -44,6 +45,20 @@ st.markdown(
|
|
44 |
# Add the blurred background div
|
45 |
st.markdown('<div class="blurred-background"></div>', unsafe_allow_html=True)
|
46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
# Load dataset for context
|
48 |
@st.cache_resource
|
49 |
def load_counseling_dataset():
|
@@ -51,13 +66,6 @@ def load_counseling_dataset():
|
|
51 |
|
52 |
dataset = load_counseling_dataset()
|
53 |
|
54 |
-
# Load text-generation model
|
55 |
-
@st.cache_resource
|
56 |
-
def load_text_generation_model():
|
57 |
-
return pipeline("text-generation", model="EleutherAI/gpt-neo-1.3B")
|
58 |
-
|
59 |
-
text_generator = load_text_generation_model()
|
60 |
-
|
61 |
# Streamlit app
|
62 |
st.title("Mental Health Counseling Chat")
|
63 |
st.markdown("""
|
@@ -65,47 +73,41 @@ Welcome to the Mental Health Counseling Chat application.
|
|
65 |
This platform is designed to provide supportive, positive, and encouraging responses based on mental health counseling expertise.
|
66 |
""")
|
67 |
|
68 |
-
# Check
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
|
|
|
|
|
|
76 |
|
77 |
-
#
|
78 |
-
|
79 |
-
sample = dataset["train"].shuffle(seed=42).select(range(3)) # Display 3 random samples
|
80 |
-
for example in sample:
|
81 |
-
st.markdown(f"**Question:** {example[question_col]}")
|
82 |
-
st.markdown(f"**Answer:** {example[answer_col]}")
|
83 |
-
st.markdown("---")
|
84 |
|
85 |
-
|
86 |
-
user_input
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
st.subheader("Counselor's Response:")
|
97 |
-
st.write(counselor_reply)
|
98 |
-
except Exception as e:
|
99 |
-
st.error(f"An error occurred while generating the response: {e}")
|
100 |
-
else:
|
101 |
-
st.error("Please enter a question or concern to receive a response.")
|
102 |
-
|
103 |
-
# Sidebar resources
|
104 |
-
st.sidebar.header("Additional Mental Health Resources")
|
105 |
-
st.sidebar.markdown("""
|
106 |
-
- [Mental Health Foundation](https://www.mentalhealth.org)
|
107 |
-
- [Mind](https://www.mind.org.uk)
|
108 |
-
- [National Suicide Prevention Lifeline](https://suicidepreventionlifeline.org)
|
109 |
-
""")
|
110 |
|
111 |
-
st.sidebar.info("This application is not a replacement for professional counseling. If you are in crisis, please seek professional help immediately.")
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import AutoTokenizer
|
3 |
+
from llama_cpp import Llama
|
4 |
+
#from transformers import pipeline
|
5 |
#from peft import PeftModel, PeftConfig
|
6 |
from transformers import AutoModelForCausalLM
|
7 |
from datasets import load_dataset
|
|
|
45 |
# Add the blurred background div
|
46 |
st.markdown('<div class="blurred-background"></div>', unsafe_allow_html=True)
|
47 |
|
48 |
+
# Path to your GGUF model file
|
49 |
+
MODEL_PATH = "/path/to/QuantFactory/Mental-Health-FineTuned-Mistral-7B-Instruct-v0.2-GGUF"
|
50 |
+
|
51 |
+
# Load Llama model
|
52 |
+
@st.cache_resource
|
53 |
+
def load_llama_model():
|
54 |
+
try:
|
55 |
+
return Llama(model_path=MODEL_PATH, n_threads=8) # Adjust `n_threads` based on your system
|
56 |
+
except Exception as e:
|
57 |
+
st.error(f"Error loading model: {e}")
|
58 |
+
return None
|
59 |
+
|
60 |
+
llama_model = load_llama_model()
|
61 |
+
|
62 |
# Load dataset for context
|
63 |
@st.cache_resource
|
64 |
def load_counseling_dataset():
|
|
|
66 |
|
67 |
dataset = load_counseling_dataset()
|
68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
# Streamlit app
|
70 |
st.title("Mental Health Counseling Chat")
|
71 |
st.markdown("""
|
|
|
73 |
This platform is designed to provide supportive, positive, and encouraging responses based on mental health counseling expertise.
|
74 |
""")
|
75 |
|
76 |
+
# Check if the model loaded correctly
|
77 |
+
if llama_model is None:
|
78 |
+
st.error("The text generation model could not be loaded. Please check the model path and configuration.")
|
79 |
+
else:
|
80 |
+
# Explore dataset for additional context or resources (optional)
|
81 |
+
if st.checkbox("Show Example Questions and Answers from Dataset"):
|
82 |
+
sample = dataset["train"].shuffle(seed=42).select(range(3)) # Display 3 random samples
|
83 |
+
for example in sample:
|
84 |
+
st.markdown(f"**Question:** {example.get('context', 'N/A')}")
|
85 |
+
st.markdown(f"**Answer:** {example.get('response', 'N/A')}")
|
86 |
+
st.markdown("---")
|
87 |
|
88 |
+
# User input for mental health concerns
|
89 |
+
user_input = st.text_area("Your question or concern:", placeholder="Type here...")
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
+
if st.button("Get Supportive Response"):
|
92 |
+
if user_input.strip():
|
93 |
+
try:
|
94 |
+
# Generate response using Llama
|
95 |
+
prompt = f"User: {user_input}\nCounselor:"
|
96 |
+
response = llama_model(prompt, max_tokens=200, stop=["\n", "User:"])
|
97 |
+
|
98 |
+
st.subheader("Counselor's Response:")
|
99 |
+
st.write(response["choices"][0]["text"].strip())
|
100 |
+
except Exception as e:
|
101 |
+
st.error(f"An error occurred while generating the response: {e}")
|
102 |
+
else:
|
103 |
+
st.error("Please enter a question or concern to receive a response.")
|
104 |
|
105 |
+
# Sidebar resources
|
106 |
+
st.sidebar.header("Additional Mental Health Resources")
|
107 |
+
st.sidebar.markdown("""
|
108 |
+
- [Mental Health Foundation](https://www.mentalhealth.org)
|
109 |
+
- [Mind](https://www.mind.org.uk)
|
110 |
+
- [National Suicide Prevention Lifeline](https://suicidepreventionlifeline.org)
|
111 |
+
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
|
113 |
+
st.sidebar.info("This application is not a replacement for professional counseling. If you are in crisis, please seek professional help immediately.")
|