neural-chatbot / test.py
ierhon's picture
Add verbose setting to generate
c323e5c
raw
history blame
1.28 kB
import numpy as np
from keras.saving import load_model
from keras.preprocessing.text import Tokenizer
from keras_self_attention import SeqSelfAttention
from model_settings import *
with open("responses.txt", "r") as f:
lines = [x.rstrip("\n") for x in f.readlines()]
tokenizer = Tokenizer() # a tokenizer is a thing to split text into words, it might have some other stuff like making all the letters lowercase, etc.
tokenizer.fit_on_texts(lines)
model = load_model("chatbot.keras", custom_objects={"SeqSelfAttention": SeqSelfAttention})
def find_line_number(array):
return sorted(zip(list(array), [x for x in range(len(array))]), key=lambda x:x[0], reverse=True)[0][1] # yeah, one big line, find the biggest value and return the number of the line
def generate(text, verbose=1):
tokens = list(tokenizer.texts_to_sequences([text,])[0]) # text into tokens (almost words)
tokens = (tokens+[0,]*inp_len)[:inp_len] # cutting off the sentence after inp_len words
prediction = model.predict(np.array([tokens,]), verbose=verbose)[0]
line = find_line_number(prediction)
return lines[line]
if __name__ == "__main__": # if this code is not being imported, open the chat
while True:
inp = input("User: ")
print(f"Bot: {generate(inp)}")