Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
import streamlit as st
|
2 |
-
from transformers import AutoTokenizer
|
3 |
from llama_cpp import Llama
|
4 |
#from transformers import pipeline
|
5 |
#from peft import PeftModel, PeftConfig
|
6 |
-
from transformers import AutoModelForCausalLM
|
7 |
from datasets import load_dataset
|
8 |
|
9 |
# Replace with the direct image URL
|
@@ -45,14 +45,17 @@ st.markdown(
|
|
45 |
# Add the blurred background div
|
46 |
st.markdown('<div class="blurred-background"></div>', unsafe_allow_html=True)
|
47 |
|
48 |
-
|
49 |
-
|
|
|
|
|
50 |
|
51 |
# Load Llama model
|
52 |
@st.cache_resource
|
53 |
def load_llama_model():
|
54 |
try:
|
55 |
-
|
|
|
56 |
except Exception as e:
|
57 |
st.error(f"Error loading model: {e}")
|
58 |
return None
|
@@ -66,16 +69,16 @@ def load_counseling_dataset():
|
|
66 |
|
67 |
dataset = load_counseling_dataset()
|
68 |
|
69 |
-
# Streamlit
|
70 |
st.title("Mental Health Counseling Chat")
|
71 |
st.markdown("""
|
72 |
-
Welcome to the Mental Health Counseling Chat
|
73 |
-
This platform is designed to provide supportive, positive, and encouraging responses based on mental health counseling expertise.
|
74 |
""")
|
75 |
|
76 |
# Check if the model loaded correctly
|
77 |
if llama_model is None:
|
78 |
-
st.error("The text generation model could not be loaded. Please check
|
79 |
else:
|
80 |
# Explore dataset for additional context or resources (optional)
|
81 |
if st.checkbox("Show Example Questions and Answers from Dataset"):
|
@@ -86,7 +89,7 @@ else:
|
|
86 |
st.markdown("---")
|
87 |
|
88 |
# User input for mental health concerns
|
89 |
-
user_input = st.text_area("Your question or concern:", placeholder="Type here...")
|
90 |
|
91 |
if st.button("Get Supportive Response"):
|
92 |
if user_input.strip():
|
@@ -110,4 +113,4 @@ else:
|
|
110 |
- [National Suicide Prevention Lifeline](https://suicidepreventionlifeline.org)
|
111 |
""")
|
112 |
|
113 |
-
st.sidebar.info("This application is not a replacement for professional counseling. If you are in crisis, please seek professional help immediately.")
|
|
|
1 |
import streamlit as st
|
2 |
+
#from transformers import AutoTokenizer
|
3 |
from llama_cpp import Llama
|
4 |
#from transformers import pipeline
|
5 |
#from peft import PeftModel, PeftConfig
|
6 |
+
#from transformers import AutoModelForCausalLM
|
7 |
from datasets import load_dataset
|
8 |
|
9 |
# Replace with the direct image URL
|
|
|
45 |
# Add the blurred background div
|
46 |
st.markdown('<div class="blurred-background"></div>', unsafe_allow_html=True)
|
47 |
|
48 |
+
""""""""""""""""""""""""" Application Code Starts here """""""""""""""""""""""""""""""""""""""""""""
|
49 |
+
|
50 |
+
# Path to the GGUF model on Hugging Face (Hugging Face Spaces automatically downloads it)
|
51 |
+
MODEL_PATH = "/root/.cache/huggingface/hub/models--QuantFactory--Mental-Health-FineTuned-Mistral-7B-Instruct-v0.2-GGUF/blobs/"
|
52 |
|
53 |
# Load Llama model
|
54 |
@st.cache_resource
|
55 |
def load_llama_model():
|
56 |
try:
|
57 |
+
# The GGUF model will be cached automatically by llama.cpp
|
58 |
+
return Llama(model_path=f"{MODEL_PATH}/model.gguf", n_threads=8) # Adjust n_threads based on your environment
|
59 |
except Exception as e:
|
60 |
st.error(f"Error loading model: {e}")
|
61 |
return None
|
|
|
69 |
|
70 |
dataset = load_counseling_dataset()
|
71 |
|
72 |
+
# Streamlit App
|
73 |
st.title("Mental Health Counseling Chat")
|
74 |
st.markdown("""
|
75 |
+
Welcome to the **Mental Health Counseling Chat Application**.
|
76 |
+
This platform is designed to provide **supportive, positive, and encouraging responses** based on mental health counseling expertise.
|
77 |
""")
|
78 |
|
79 |
# Check if the model loaded correctly
|
80 |
if llama_model is None:
|
81 |
+
st.error("The text generation model could not be loaded. Please check your configuration.")
|
82 |
else:
|
83 |
# Explore dataset for additional context or resources (optional)
|
84 |
if st.checkbox("Show Example Questions and Answers from Dataset"):
|
|
|
89 |
st.markdown("---")
|
90 |
|
91 |
# User input for mental health concerns
|
92 |
+
user_input = st.text_area("Your question or concern:", placeholder="Type your question here...")
|
93 |
|
94 |
if st.button("Get Supportive Response"):
|
95 |
if user_input.strip():
|
|
|
113 |
- [National Suicide Prevention Lifeline](https://suicidepreventionlifeline.org)
|
114 |
""")
|
115 |
|
116 |
+
st.sidebar.info("This application is not a replacement for professional counseling. If you are in crisis, please seek professional help immediately.")
|