Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -11,42 +11,40 @@ from nltk.stem.lancaster import LancasterStemmer
|
|
11 |
nltk.download('punkt')
|
12 |
stemmer = LancasterStemmer()
|
13 |
|
14 |
-
# Load intents
|
15 |
with open("intents.json") as file:
|
16 |
data = json.load(file)
|
17 |
|
18 |
-
#
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
for
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
# Build the model
|
50 |
net = tflearn.input_data(shape=[None, len(training[0])])
|
51 |
net = tflearn.fully_connected(net, 8)
|
52 |
net = tflearn.fully_connected(net, 8)
|
@@ -54,22 +52,26 @@ net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
|
|
54 |
net = tflearn.regression(net)
|
55 |
|
56 |
model = tflearn.DNN(net)
|
|
|
57 |
try:
|
58 |
model.load("MentalHealthChatBotmodel.tflearn")
|
59 |
except FileNotFoundError:
|
60 |
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
|
61 |
model.save("MentalHealthChatBotmodel.tflearn")
|
62 |
|
63 |
-
#
|
64 |
def bag_of_words(s, words):
|
65 |
-
bag = [0
|
66 |
-
s_words =
|
|
|
|
|
67 |
for se in s_words:
|
68 |
for i, w in enumerate(words):
|
69 |
if w == se:
|
70 |
bag[i] = 1
|
71 |
return np.array(bag)
|
72 |
|
|
|
73 |
def chat(message, history=None):
|
74 |
history = history or []
|
75 |
try:
|
@@ -77,6 +79,7 @@ def chat(message, history=None):
|
|
77 |
results = model.predict([bag])
|
78 |
results_index = np.argmax(results)
|
79 |
tag = labels[results_index]
|
|
|
80 |
for tg in data["intents"]:
|
81 |
if tg['tag'] == tag:
|
82 |
response = random.choice(tg['responses'])
|
@@ -93,7 +96,8 @@ demo = gr.Interface(
|
|
93 |
fn=chat,
|
94 |
inputs=[gr.Textbox(lines=1, label="Message"), gr.State()],
|
95 |
outputs=[gr.Chatbot(label="Chat"), gr.State()],
|
96 |
-
allow_flagging="never"
|
|
|
97 |
)
|
98 |
|
99 |
if __name__ == "__main__":
|
|
|
11 |
nltk.download('punkt')
|
12 |
stemmer = LancasterStemmer()
|
13 |
|
14 |
+
# Load intents file
|
15 |
with open("intents.json") as file:
|
16 |
data = json.load(file)
|
17 |
|
18 |
+
# Data preprocessing
|
19 |
+
words, labels, docs_x, docs_y = [], [], [], []
|
20 |
+
for intent in data["intents"]:
|
21 |
+
for pattern in intent["patterns"]:
|
22 |
+
wrds = nltk.word_tokenize(pattern)
|
23 |
+
words.extend(wrds)
|
24 |
+
docs_x.append(wrds)
|
25 |
+
docs_y.append(intent["tag"])
|
26 |
+
if intent["tag"] not in labels:
|
27 |
+
labels.append(intent["tag"])
|
28 |
+
|
29 |
+
# Stem and sort words
|
30 |
+
words = sorted(set(stemmer.stem(w.lower()) for w in words if w not in ["?", ".", ",", "!"]))
|
31 |
+
labels = sorted(labels)
|
32 |
+
|
33 |
+
# Create training data
|
34 |
+
training, output = [], []
|
35 |
+
out_empty = [0] * len(labels)
|
36 |
+
|
37 |
+
for x, doc in enumerate(docs_x):
|
38 |
+
bag = [1 if stemmer.stem(w.lower()) in [stemmer.stem(word) for word in doc] else 0 for w in words]
|
39 |
+
output_row = out_empty[:]
|
40 |
+
output_row[labels.index(docs_y[x])] = 1
|
41 |
+
training.append(bag)
|
42 |
+
output.append(output_row)
|
43 |
+
|
44 |
+
training, output = np.array(training), np.array(output)
|
45 |
+
|
46 |
+
# Build and train the model
|
47 |
+
tf.compat.v1.reset_default_graph()
|
|
|
|
|
48 |
net = tflearn.input_data(shape=[None, len(training[0])])
|
49 |
net = tflearn.fully_connected(net, 8)
|
50 |
net = tflearn.fully_connected(net, 8)
|
|
|
52 |
net = tflearn.regression(net)
|
53 |
|
54 |
model = tflearn.DNN(net)
|
55 |
+
|
56 |
try:
|
57 |
model.load("MentalHealthChatBotmodel.tflearn")
|
58 |
except FileNotFoundError:
|
59 |
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
|
60 |
model.save("MentalHealthChatBotmodel.tflearn")
|
61 |
|
62 |
+
# Function to preprocess user input
|
63 |
def bag_of_words(s, words):
|
64 |
+
bag = [0 for _ in range(len(words))]
|
65 |
+
s_words = nltk.word_tokenize(s)
|
66 |
+
s_words = [stemmer.stem(word.lower()) for word in s_words]
|
67 |
+
|
68 |
for se in s_words:
|
69 |
for i, w in enumerate(words):
|
70 |
if w == se:
|
71 |
bag[i] = 1
|
72 |
return np.array(bag)
|
73 |
|
74 |
+
# Chat function
|
75 |
def chat(message, history=None):
|
76 |
history = history or []
|
77 |
try:
|
|
|
79 |
results = model.predict([bag])
|
80 |
results_index = np.argmax(results)
|
81 |
tag = labels[results_index]
|
82 |
+
|
83 |
for tg in data["intents"]:
|
84 |
if tg['tag'] == tag:
|
85 |
response = random.choice(tg['responses'])
|
|
|
96 |
fn=chat,
|
97 |
inputs=[gr.Textbox(lines=1, label="Message"), gr.State()],
|
98 |
outputs=[gr.Chatbot(label="Chat"), gr.State()],
|
99 |
+
allow_flagging="never",
|
100 |
+
title="Wellbeing for All | Generative AI Enthusiasts"
|
101 |
)
|
102 |
|
103 |
if __name__ == "__main__":
|