ierhon commited on
Commit
6fceebb
1 Parent(s): e75ea50

Update train.py

Browse files
Files changed (1) hide show
  1. train.py +23 -1
train.py CHANGED
@@ -6,6 +6,7 @@ from keras.layers import Embedding, Dense, Dropout, Flatten, PReLU
6
  from keras.preprocessing.text import Tokenizer
7
  from keras_self_attention import SeqSelfAttention, SeqWeightedAttention
8
 
 
9
  with open("dataset.json", "r") as f: # TODO: move the outputs into a separate file, so it would be "key": 0, "key2": 1 etc
10
  dset = json.load(f)
11
 
@@ -28,4 +29,25 @@ model.add(Dense(512, activation="relu"))
28
  model.add(Dense(256, activation="relu"))
29
  model.add(Dense(dset_size, activation="linear")) # TBH it doesn't matter that much what activation function to use, just linear does nothing at all to the output, that might be something like softmax but i'll test that later
30
 
31
- model.save("chatbot.keras") # It's obvious what it does, saves the model to a file
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  from keras.preprocessing.text import Tokenizer
7
  from keras_self_attention import SeqSelfAttention, SeqWeightedAttention
8
 
9
+
10
  with open("dataset.json", "r") as f: # TODO: move the outputs into a separate file, so it would be "key": 0, "key2": 1 etc
11
  dset = json.load(f)
12
 
 
29
  model.add(Dense(256, activation="relu"))
30
  model.add(Dense(dset_size, activation="linear")) # TBH it doesn't matter that much what activation function to use, just linear does nothing at all to the output, that might be something like softmax but i'll test that later
31
 
32
+ X = [] # we're loading the training data into input X
33
+ y = [] # and output y
34
+
35
+ for line, key in enumerate(dset):
36
+ tokens = tokenizer.tokenize(key)
37
+ X.append(numpy.array((list(tokens)+[0,]*inp_len)[:inp_len])) # refusing to use pad_sequences for an unspecified reason and creating the worst line of code
38
+ output_array = np.zeros(dset_size)
39
+ output_array[line] = 1 # 0 0 0 1 0 0 0 0 0, the neuron of the each line activates in the correct response
40
+ y.append(output_array)
41
+
42
+ X = np.array(X) # normal lists are way slower than numpy arrays (remember, a list and an array is not the same thing, an array is far more limited)
43
+ y = np.array(y) # that's why keras supports only numpy arrays ^
44
+
45
+ model.compile(optimizer=Adam(), loss="mse", metrics=["accuracy",]) # kind of like settings for the training
46
+ # TODO: change the loss
47
+
48
+ model.fit(X, y, epochs=10, batch_size=8) # training the model, epochs means how many times does it have to read the data, batch_size is an optimization to train on multiple messages at the same time. Loss and accuracy are the opposite things, loss is how far the output is from a correct one, from 1 to 0, and accuracy how often does the model get the answer right, from 0 to 1.
49
+ # Use workers=4, use_multiprocessing=True) if you don't have a GPU
50
+
51
+ model.summary() # just for you to see info about the model, useful because you can check the parameter count
52
+
53
+ model.save("chatbot.keras")