|
import streamlit as st |
|
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer |
|
|
|
|
|
model_name = "facebook/blenderbot-90M" |
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
def chatbot_response(input_text): |
|
inputs = tokenizer(input_text, return_tensors="pt") |
|
outputs = model.generate(**inputs, max_length=100) |
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
return response |
|
|
|
|
|
st.title("Emotional Support Chatbot") |
|
st.write("Chat with the emotional support bot below:") |
|
|
|
user_input = st.text_input("Your Message") |
|
if user_input: |
|
response = chatbot_response(user_input) |
|
st.text_area("Chatbot Response", response, height=150) |