Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -12,6 +12,7 @@ def train(data: str, message: str):
|
|
12 |
if "→" not in data and "\n" not in data:
|
13 |
return "Dataset should be like:\nquestion→answer\nquestion→answer\netc."
|
14 |
dset, responses = todset(data)
|
|
|
15 |
tokenizer = Tokenizer()
|
16 |
tokenizer.fit_on_texts(list(dset.keys()))
|
17 |
|
@@ -26,7 +27,7 @@ def train(data: str, message: str):
|
|
26 |
model.add(Dense(512, activation="relu"))
|
27 |
model.add(Dense(512, activation="relu"))
|
28 |
model.add(Dense(256, activation="relu"))
|
29 |
-
model.add(Dense(
|
30 |
|
31 |
X = []
|
32 |
y = []
|
@@ -34,7 +35,7 @@ def train(data: str, message: str):
|
|
34 |
for key in dset:
|
35 |
tokens = tokenizer.texts_to_sequences([key,])[0]
|
36 |
X.append(np.array((list(tokens)+[0,]*inp_len)[:inp_len]))
|
37 |
-
output_array = np.zeros(
|
38 |
output_array[dset[key]] = 1
|
39 |
y.append(output_array)
|
40 |
|
|
|
12 |
if "→" not in data and "\n" not in data:
|
13 |
return "Dataset should be like:\nquestion→answer\nquestion→answer\netc."
|
14 |
dset, responses = todset(data)
|
15 |
+
resps_len = len(responses)
|
16 |
tokenizer = Tokenizer()
|
17 |
tokenizer.fit_on_texts(list(dset.keys()))
|
18 |
|
|
|
27 |
model.add(Dense(512, activation="relu"))
|
28 |
model.add(Dense(512, activation="relu"))
|
29 |
model.add(Dense(256, activation="relu"))
|
30 |
+
model.add(Dense(resps_len, activation="softmax"))
|
31 |
|
32 |
X = []
|
33 |
y = []
|
|
|
35 |
for key in dset:
|
36 |
tokens = tokenizer.texts_to_sequences([key,])[0]
|
37 |
X.append(np.array((list(tokens)+[0,]*inp_len)[:inp_len]))
|
38 |
+
output_array = np.zeros(resps_len)
|
39 |
output_array[dset[key]] = 1
|
40 |
y.append(output_array)
|
41 |
|