Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -2,46 +2,37 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
|
5 |
-
|
6 |
-
title = "????AI ChatBot"
|
7 |
description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
|
8 |
examples = [["How are you?"]]
|
9 |
|
10 |
-
|
11 |
tokenizer = AutoTokenizer.from_pretrained("betajuned/GPT-2_Kombinasi4")
|
12 |
model = AutoModelForCausalLM.from_pretrained("betajuned/GPT-2_Kombinasi4")
|
13 |
|
14 |
-
|
15 |
def predict(input, history=[]):
|
16 |
-
#
|
17 |
-
new_user_input_ids = tokenizer.encode(
|
18 |
-
|
19 |
-
|
|
|
20 |
|
21 |
-
#
|
22 |
-
|
23 |
|
24 |
-
#
|
25 |
-
|
26 |
-
bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id
|
27 |
-
).tolist()
|
28 |
|
29 |
-
#
|
30 |
-
|
31 |
-
# print('decoded_response-->>'+str(response))
|
32 |
-
response = [
|
33 |
-
(response[i], response[i + 1]) for i in range(5, len(response) - 1, 2)
|
34 |
-
] # convert to tuples of list
|
35 |
-
# print('response-->>'+str(response))
|
36 |
-
return response, history
|
37 |
|
|
|
38 |
|
39 |
gr.Interface(
|
40 |
fn=predict,
|
41 |
title=title,
|
42 |
description=description,
|
43 |
examples=examples,
|
44 |
-
inputs=["
|
45 |
-
outputs=["
|
46 |
theme="finlaymacklon/boxy_violet",
|
47 |
-
).launch()
|
|
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
|
5 |
+
title = "AI ChatBot"
|
|
|
6 |
description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
|
7 |
examples = [["How are you?"]]
|
8 |
|
|
|
9 |
tokenizer = AutoTokenizer.from_pretrained("betajuned/GPT-2_Kombinasi4")
|
10 |
model = AutoModelForCausalLM.from_pretrained("betajuned/GPT-2_Kombinasi4")
|
11 |
|
|
|
12 |
def predict(input, history=[]):
|
13 |
+
# Tokenize the new input sentence
|
14 |
+
new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors="pt")
|
15 |
+
|
16 |
+
# Append the new user input tokens to the chat history
|
17 |
+
bot_input_ids = torch.cat([torch.tensor(history), new_user_input_ids], dim=-1) if history else new_user_input_ids
|
18 |
|
19 |
+
# Generate a response
|
20 |
+
chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
|
21 |
|
22 |
+
# Convert the tokens to text
|
23 |
+
response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
|
|
|
|
|
24 |
|
25 |
+
# Update the history with the new tokens
|
26 |
+
new_history = chat_history_ids.tolist()
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
+
return response, new_history
|
29 |
|
30 |
gr.Interface(
|
31 |
fn=predict,
|
32 |
title=title,
|
33 |
description=description,
|
34 |
examples=examples,
|
35 |
+
inputs=[gr.inputs.Textbox(lines=2, placeholder="Enter your message here..."), "state"],
|
36 |
+
outputs=["text", "state"],
|
37 |
theme="finlaymacklon/boxy_violet",
|
38 |
+
).launch()
|