|
import streamlit as st |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
|
|
|
|
st.title("Generating Response with HuggingFace Models") |
|
st.markdown("## Model: `facebook/blenderbot-400M-distill`") |
|
|
|
|
|
with st.spinner("Getting this ready for you.."): |
|
model_name = "facebook/blenderbot-400M-distill" |
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
|
|
def get_response(input_text): |
|
|
|
|
|
inputs = tokenizer.encode_plus(input_text, return_tensors="pt") |
|
|
|
|
|
outputs = model.generate(**inputs) |
|
|
|
|
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True).strip() |
|
|
|
return response |
|
|
|
user_input = st.text_area("Enter your query here...") |
|
|
|
if st.button("Get Response") and user_input: |
|
with st.spinner("Generating Response..."): |
|
answer = get_response(user_input) |
|
if answer is not None: |
|
st.success('Great! Response generated successfully') |
|
st.write(answer) |