Spaces:
Sleeping
Sleeping
File size: 4,507 Bytes
a65ecf7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
import nltk
nltk.download('punkt')
from nltk.stem.lancaster import LancasterStemmer
import numpy as np
import tflearn
import tensorflow
import random
import json
import pickle
import gradio as gr
from nltk.tokenize import word_tokenize
# Ensure necessary NLTK resources are downloaded
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
# Initialize the stemmer
stemmer = LancasterStemmer()
# Load intents.json
try:
with open("intents.json") as file:
data = json.load(file)
except FileNotFoundError:
raise FileNotFoundError("Error: 'intents.json' file not found. Ensure it exists in the current directory.")
# Load preprocessed data from pickle
try:
with open("data.pickle", "rb") as f:
words, labels, training, output = pickle.load(f)
except FileNotFoundError:
raise FileNotFoundError("Error: 'data.pickle' file not found. Ensure it exists and matches the model.")
# Build the model structure
net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
# Load the trained model
model = tflearn.DNN(net)
try:
model.load("MentalHealthChatBotmodel.tflearn")
except FileNotFoundError:
print("Error: Trained model file not found. Ensure 'MentalHealthChatBotmodel.tflearn' exists.")
# Function to process user input into a bag-of-words format
def bag_of_words(s, words):
bag = [0 for _ in range(len(words))]
s_words = word_tokenize(s) # Replaced nltk.word_tokenize(s)
s_words = [stemmer.stem(word.lower()) for word in s_words if word.lower() in words]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return np.array(bag)
# Chat function
def chat(message, history):
history = history or []
message = message.lower()
try:
# Predict the tag
results = model.predict([bag_of_words(message, words)])
results_index = np.argmax(results)
tag = labels[results_index]
# Match tag with intent and choose a random response
for tg in data["intents"]:
if tg['tag'] == tag:
responses = tg['responses']
response = random.choice(responses)
break
else:
response = "I'm sorry, I didn't understand that. Could you please rephrase?"
except Exception as e:
response = f"An error occurred: {str(e)}"
history.append((message, response))
return history, history
# Gradio interface
chatbot = gr.Chatbot(label="Chat")
css = """
footer {display:none !important}
.output-markdown{display:none !important}
.gr-button-primary {
z-index: 14;
height: 43px;
width: 130px;
left: 0px;
top: 0px;
padding: 0px;
cursor: pointer !important;
background: none rgb(17, 20, 45) !important;
border: none !important;
text-align: center !important;
font-family: Poppins !important;
font-size: 14px !important;
font-weight: 500 !important;
color: rgb(255, 255, 255) !important;
line-height: 1 !important;
border-radius: 12px !important;
transition: box-shadow 200ms ease 0s, background 200ms ease 0s !important;
box-shadow: none !important;
}
.gr-button-primary:hover{
z-index: 14;
height: 43px;
width: 130px;
left: 0px;
top: 0px;
padding: 0px;
cursor: pointer !important;
background: none rgb(37, 56, 133) !important;
border: none !important;
text-align: center !important;
font-family: Poppins !important;
font-size: 14px !important;
font-weight: 500 !important;
color: rgb(255, 255, 255) !important;
line-height: 1 !important;
border-radius: 12px !important;
transition: box-shadow 200ms ease 0s, background 200ms ease 0s !important;
box-shadow: rgb(0 0 0 / 23%) 0px 1px 7px 0px !important;
}
.hover\:bg-orange-50:hover {
--tw-bg-opacity: 1 !important;
background-color: rgb(229,225,255) !important;
}
div[data-testid="user"] {
background-color: #253885 !important;
}
.h-\[40vh\]{
height: 70vh !important;
}
"""
demo = gr.Interface(
chat,
[gr.Textbox(lines=1, label="Message"), "state"],
[chatbot, "state"],
allow_flagging="never",
title="Mental Health Bot | Data Science Dojo",
css=css
)
# Launch Gradio interface
if __name__ == "__main__":
demo.launch()
|