File size: 1,389 Bytes
5299cbe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
import gradio as gr

# Load the model and tokenizer
model_name = "facebook/blenderbot-400M-distill"
tokenizer = BlenderbotTokenizer.from_pretrained(model_name)
model = BlenderbotForConditionalGeneration.from_pretrained(model_name)

# Initialize message history
conversation_history = []

# Function to interact with the chatbot
def vanilla_chatbot(message, history):
    global conversation_history
    
    # Append user message to history
    conversation_history.append(message)
    
    # Encode the new user input, add the eos_token and return a tensor in Pytorch
    inputs = tokenizer([message], return_tensors='pt')
    
    # Generate bot response
    reply_ids = model.generate(**inputs)
    bot_response = tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]

    # Append bot response to history
    conversation_history.append(bot_response)
    
    # Return the generated response
    return bot_response

    # Create a Gradio chat interface
demo_chatbot = gr.Interface(
    fn=vanilla_chatbot,
    inputs=gr.Textbox(lines=2, placeholder="Enter your message here..."),
    outputs=gr.Textbox(placeholder="Bot response will appear here..."),
    title="Mashdemy Chatbot",
    description="Enter text to start chatting."
)

# Launch the Gradio interface
demo_chatbot.launch(share=True)