from transformers import GPT2LMHeadModel, GPT2Tokenizer import torch import streamlit as st model = GPT2LMHeadModel.from_pretrained( 'sberbank-ai/rugpt3small_based_on_gpt2', output_attentions = False, output_hidden_states = False, ) tokenizer = GPT2Tokenizer.from_pretrained('sberbank-ai/rugpt3small_based_on_gpt2') # Вешаем сохраненные веса на нашу модель model.load_state_dict(torch.load('model.pt', map_location=torch.device('cpu'))) prompt = st.text_input('Введите текст prompt:') length = st.slider('Длина генерируемой последовательности:', 1, 256, 16) num_samples = st.slider('Число генераций:', 1, 4, 1) temperature = st.slider('Температура:', 1.0, 5.0, 1.0) def generate_text(model, tokenizer, prompt, length, num_samples, temperature): input_ids = tokenizer.encode(prompt, return_tensors='pt') output_sequences = model.generate( input_ids=input_ids, max_length=length, num_return_sequences=num_samples, temperature=temperature ) generated_texts = [] for output_sequence in output_sequences: generated_text = tokenizer.decode(output_sequence, clean_up_tokenization_spaces=True) generated_texts.append(generated_text) return generated_texts if st.button('Сгенерировать текст'): generated_texts = generate_text(model, tokenizer, prompt, length, num_samples, temperature) for i, text in enumerate(generated_texts): st.write(f'Текст {i+1}:') st.write(text)