tahirsher commited on
Commit
3fa17a3
·
verified ·
1 Parent(s): 7fe9a0d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -37
app.py CHANGED
@@ -11,42 +11,40 @@ from nltk.stem.lancaster import LancasterStemmer
11
  nltk.download('punkt')
12
  stemmer = LancasterStemmer()
13
 
14
- # Load intents
15
  with open("intents.json") as file:
16
  data = json.load(file)
17
 
18
- # Load or regenerate data.pickle
19
- try:
20
- with open("data.pickle", "rb") as f:
21
- words, labels, training, output = pickle.load(f)
22
- except FileNotFoundError:
23
- words, labels, docs_x, docs_y = [], [], [], []
24
- for intent in data["intents"]:
25
- for pattern in intent["patterns"]:
26
- wrds = nltk.word_tokenize(pattern)
27
- words.extend(wrds)
28
- docs_x.append(wrds)
29
- docs_y.append(intent["tag"])
30
- if intent["tag"] not in labels:
31
- labels.append(intent["tag"])
32
-
33
- words = sorted(set(stemmer.stem(w.lower()) for w in words if w not in ["?", ".", ",", "!"]))
34
- labels = sorted(labels)
35
-
36
- training, output = [], []
37
- out_empty = [0] * len(labels)
38
- for x, doc in enumerate(docs_x):
39
- bag = [1 if stemmer.stem(w.lower()) in [stemmer.stem(word) for word in doc] else 0 for w in words]
40
- output_row = out_empty[:]
41
- output_row[labels.index(docs_y[x])] = 1
42
- training.append(bag)
43
- output.append(output_row)
44
-
45
- training, output = np.array(training), np.array(output)
46
- with open("data.pickle", "wb") as f:
47
- pickle.dump((words, labels, training, output), f)
48
-
49
- # Build the model
50
  net = tflearn.input_data(shape=[None, len(training[0])])
51
  net = tflearn.fully_connected(net, 8)
52
  net = tflearn.fully_connected(net, 8)
@@ -54,22 +52,26 @@ net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
54
  net = tflearn.regression(net)
55
 
56
  model = tflearn.DNN(net)
 
57
  try:
58
  model.load("MentalHealthChatBotmodel.tflearn")
59
  except FileNotFoundError:
60
  model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
61
  model.save("MentalHealthChatBotmodel.tflearn")
62
 
63
- # Define chat function
64
  def bag_of_words(s, words):
65
- bag = [0] * len(words)
66
- s_words = [stemmer.stem(w.lower()) for w in nltk.word_tokenize(s)]
 
 
67
  for se in s_words:
68
  for i, w in enumerate(words):
69
  if w == se:
70
  bag[i] = 1
71
  return np.array(bag)
72
 
 
73
  def chat(message, history=None):
74
  history = history or []
75
  try:
@@ -77,6 +79,7 @@ def chat(message, history=None):
77
  results = model.predict([bag])
78
  results_index = np.argmax(results)
79
  tag = labels[results_index]
 
80
  for tg in data["intents"]:
81
  if tg['tag'] == tag:
82
  response = random.choice(tg['responses'])
@@ -93,7 +96,8 @@ demo = gr.Interface(
93
  fn=chat,
94
  inputs=[gr.Textbox(lines=1, label="Message"), gr.State()],
95
  outputs=[gr.Chatbot(label="Chat"), gr.State()],
96
- allow_flagging="never"
 
97
  )
98
 
99
  if __name__ == "__main__":
 
11
  nltk.download('punkt')
12
  stemmer = LancasterStemmer()
13
 
14
+ # Load intents file
15
  with open("intents.json") as file:
16
  data = json.load(file)
17
 
18
+ # Data preprocessing
19
+ words, labels, docs_x, docs_y = [], [], [], []
20
+ for intent in data["intents"]:
21
+ for pattern in intent["patterns"]:
22
+ wrds = nltk.word_tokenize(pattern)
23
+ words.extend(wrds)
24
+ docs_x.append(wrds)
25
+ docs_y.append(intent["tag"])
26
+ if intent["tag"] not in labels:
27
+ labels.append(intent["tag"])
28
+
29
+ # Stem and sort words
30
+ words = sorted(set(stemmer.stem(w.lower()) for w in words if w not in ["?", ".", ",", "!"]))
31
+ labels = sorted(labels)
32
+
33
+ # Create training data
34
+ training, output = [], []
35
+ out_empty = [0] * len(labels)
36
+
37
+ for x, doc in enumerate(docs_x):
38
+ bag = [1 if stemmer.stem(w.lower()) in [stemmer.stem(word) for word in doc] else 0 for w in words]
39
+ output_row = out_empty[:]
40
+ output_row[labels.index(docs_y[x])] = 1
41
+ training.append(bag)
42
+ output.append(output_row)
43
+
44
+ training, output = np.array(training), np.array(output)
45
+
46
+ # Build and train the model
47
+ tf.compat.v1.reset_default_graph()
 
 
48
  net = tflearn.input_data(shape=[None, len(training[0])])
49
  net = tflearn.fully_connected(net, 8)
50
  net = tflearn.fully_connected(net, 8)
 
52
  net = tflearn.regression(net)
53
 
54
  model = tflearn.DNN(net)
55
+
56
  try:
57
  model.load("MentalHealthChatBotmodel.tflearn")
58
  except FileNotFoundError:
59
  model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
60
  model.save("MentalHealthChatBotmodel.tflearn")
61
 
62
+ # Function to preprocess user input
63
  def bag_of_words(s, words):
64
+ bag = [0 for _ in range(len(words))]
65
+ s_words = nltk.word_tokenize(s)
66
+ s_words = [stemmer.stem(word.lower()) for word in s_words]
67
+
68
  for se in s_words:
69
  for i, w in enumerate(words):
70
  if w == se:
71
  bag[i] = 1
72
  return np.array(bag)
73
 
74
+ # Chat function
75
  def chat(message, history=None):
76
  history = history or []
77
  try:
 
79
  results = model.predict([bag])
80
  results_index = np.argmax(results)
81
  tag = labels[results_index]
82
+
83
  for tg in data["intents"]:
84
  if tg['tag'] == tag:
85
  response = random.choice(tg['responses'])
 
96
  fn=chat,
97
  inputs=[gr.Textbox(lines=1, label="Message"), gr.State()],
98
  outputs=[gr.Chatbot(label="Chat"), gr.State()],
99
+ allow_flagging="never",
100
+ title="Wellbeing for All | Generative AI Enthusiasts"
101
  )
102
 
103
  if __name__ == "__main__":