hf_model / app.py
abaliyan's picture
Update app.py
f02d1d0 verified
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
st.title("Generating Response with HuggingFace Models")
st.markdown("## Model: `facebook/blenderbot-400M-distill`")
with st.spinner("Getting this ready for you.."):
model_name = "facebook/blenderbot-400M-distill"
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
def get_response(input_text):
# Tokenize the input text and history
inputs = tokenizer.encode_plus(input_text, return_tensors="pt")
# Generate the response from the model
outputs = model.generate(**inputs)
# Decode the response
response = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
return response
user_input = st.text_area("Enter your query here...")
if st.button("Get Response") and user_input:
with st.spinner("Generating Response..."):
answer = get_response(user_input)
if answer is not None:
st.success('Great! Response generated successfully')
st.write(answer)