Spaces:
Sleeping
Sleeping
import streamlit as st | |
import transformers | |
def load_stuff(): | |
model = transformers.AutoModelForCausalLM.from_pretrained("distilgpt2") | |
tokenizer = transformers.AutoTokenizer.from_pretrained("distilgpt2") | |
return model, tokenizer | |
st.image("./img.jpg") | |
model, tokenizer = load_stuff() | |
user_inputed_text = st.text_input("Insert text") | |
if len(user_inputed_text) == 0: | |
outputs_text = "no text provided. write some text, meatbag" | |
else: | |
outputs = model.generate( | |
**tokenizer([user_inputed_text], return_tensors='pt'), | |
max_new_tokens=50, do_sample=True, | |
) | |
outputs_text = tokenizer.decode(outputs[0]) | |
st.write(value=outputs_text) | |