Spaces:
Sleeping
Sleeping
fix cache ignore
Browse files
app.py
CHANGED
@@ -27,7 +27,7 @@ def train(data: str, message: str):
|
|
27 |
|
28 |
vocab_size = len(tokenizer.word_index) + 1
|
29 |
data_hash = hash_str(data)+".keras"
|
30 |
-
if
|
31 |
model = load_model(data_hash)
|
32 |
else:
|
33 |
input_layer = Input(shape=(inp_len,))
|
@@ -68,7 +68,7 @@ def train(data: str, message: str):
|
|
68 |
model.compile(loss="sparse_categorical_crossentropy", metrics=["accuracy",])
|
69 |
|
70 |
model.fit(X, y, epochs=16, batch_size=8, workers=4, use_multiprocessing=True)
|
71 |
-
model.save(f"cache/{data_hash}
|
72 |
tokens = tokenizer.texts_to_sequences([message,])[0]
|
73 |
prediction = model.predict(np.array([(list(tokens)+[0,]*inp_len)[:inp_len],]))[0]
|
74 |
max_o = 0
|
|
|
27 |
|
28 |
vocab_size = len(tokenizer.word_index) + 1
|
29 |
data_hash = hash_str(data)+".keras"
|
30 |
+
if data_hash in os.listdir("cache"):
|
31 |
model = load_model(data_hash)
|
32 |
else:
|
33 |
input_layer = Input(shape=(inp_len,))
|
|
|
68 |
model.compile(loss="sparse_categorical_crossentropy", metrics=["accuracy",])
|
69 |
|
70 |
model.fit(X, y, epochs=16, batch_size=8, workers=4, use_multiprocessing=True)
|
71 |
+
model.save(f"cache/{data_hash}")
|
72 |
tokens = tokenizer.texts_to_sequences([message,])[0]
|
73 |
prediction = model.predict(np.array([(list(tokens)+[0,]*inp_len)[:inp_len],]))[0]
|
74 |
max_o = 0
|