|
import gradio as gr |
|
import json |
|
import random |
|
import pickle |
|
import numpy as np |
|
import nltk |
|
from tensorflow.keras.models import Sequential |
|
from tensorflow.keras.optimizers.legacy import SGD |
|
from tensorflow.keras.layers import Dense,Activation,Dropout |
|
from tensorflow.keras.models import load_model |
|
|
|
|
|
from nltk.stem import WordNetLemmatizer |
|
lemmatizer = WordNetLemmatizer() |
|
|
|
import nltk |
|
nltk.download('punkt') |
|
nltk.download('wordnet') |
|
|
|
|
|
new_intents = json.loads(open('intents.json').read()) |
|
words = pickle.load(open('words.pkl', 'rb')) |
|
classes = pickle.load(open('classes.pkl', 'rb')) |
|
model = load_model("chatbot_final.h5") |
|
|
|
|
|
|
|
new_intents = json.loads(open('intents.json').read()) |
|
words = pickle.load(open('words.pkl', 'rb')) |
|
classes = pickle.load(open('classes.pkl', 'rb')) |
|
model = load_model("chatbot_final.h5") |
|
|
|
|
|
def clean_up_sentence(sentence): |
|
sentence_words = nltk.word_tokenize(sentence) |
|
sentence_words = [lemmatizer.lemmatize(word) for word in sentence_words] |
|
return sentence_words |
|
|
|
|
|
def bag_of_words(sentence): |
|
sentence_words = clean_up_sentence(sentence) |
|
bag = [0] * len(words) |
|
for w in sentence_words: |
|
for i, word in enumerate(words): |
|
if word == w: |
|
bag[i] = 1 |
|
return np.array(bag) |
|
|
|
|
|
def predict_class(sentence): |
|
bow = bag_of_words(sentence) |
|
res = model.predict(np.array([bow]))[0] |
|
ERROR_THRESHOLD = 0.25 |
|
results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD] |
|
|
|
results.sort(key=lambda x: x[1], reverse=True) |
|
return_list = [] |
|
for r in results: |
|
return_list.append({'intent': classes[r[0]], 'probability': str(r[1])}) |
|
|
|
if not return_list: |
|
|
|
return_list.append({'intent': 'default', 'probability': '1.0'}) |
|
|
|
return return_list |
|
def get_response(intents_list, intents_json): |
|
tag = intents_list[0]['intent'] |
|
list_of_intents = intents_json['intents'] |
|
for i in list_of_intents: |
|
if i['tag'] == tag: |
|
result = random.choice(i['responses']) |
|
break |
|
return result |
|
|
|
|
|
|
|
|
|
def greet(mesage): |
|
ints = predict_class(mesage) |
|
res = get_response(ints) |
|
return res |
|
|
|
iface = gr.Interface(fn=greet, inputs="text", outputs="text") |
|
iface.launch() |