|
import streamlit as st |
|
import transformers |
|
import torch |
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer |
|
|
|
@st.cache_resource |
|
def load_model(): |
|
borgesian = GPT2LMHeadModel.from_pretrained('sberbank-ai/rugpt3small_based_on_gpt2', output_attentions = False, output_hidden_states = False) |
|
tokenizer = GPT2Tokenizer.from_pretrained("sberbank-ai/rugpt3small_based_on_gpt2") |
|
return borgesian, tokenizer |
|
|
|
borgesian, tokenizer = load_model() |
|
borgesian.load_state_dict(torch.load('borgesian_weights.pt', map_location=torch.device('cpu'))) |
|
borgesian.to('cpu') |
|
borgesian.eval() |
|
|
|
def generate_response(text, temperature, length, top_p): |
|
input_ids = tokenizer.encode(text, return_tensors="pt") |
|
with torch.no_grad(): |
|
out = borgesian.generate(input_ids, do_sample=True, num_beams=2, temperature=float(temperature), top_p=float(top_p), max_length=length) |
|
generated_text = list(map(tokenizer.decode, out))[0] |
|
last_full_stop_index = generated_text.rfind('.') |
|
st.write(generated_text[:last_full_stop_index + 1]) |
|
|
|
st.title('Borgesian') |
|
st.image('borges.jpg') |
|
st.write('Write a prompt in Russian, and the GPT-based model will follow up with a Borgesian text.') |
|
st.write('Define the parameters of generation:') |
|
temperature = st.slider('Temperature', value = 1.5, min_value = 1.0, max_value = 5.0, step = 0.1) |
|
length = st.slider('Length', value = 50, min_value = 20, max_value = 250, step = 1) |
|
top_p = st.slider('Top-p value', value = 0.9, min_value = 0.5, max_value = 1.0, step = 0.05) |
|
|
|
user_input = st.text_area("Enter your text:") |
|
if st.button("Send"): |
|
if user_input: |
|
generate_response(user_input, temperature, length, top_p) |
|
st.image('penrose_tiling.jpg') |
|
else: |
|
st.warning("Please enter some text.") |
|
|